Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[RomApp] Take into account non converged solutions in Linear and NonLinear (ANN-Enhanced) Decoders #12572

Merged
merged 3 commits into from
Jul 24, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 6 additions & 3 deletions applications/RomApplication/python_scripts/rom_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -365,7 +365,7 @@ def _LaunchTrainROM(self, mu_train):
This method should be parallel capable
"""
self._LaunchFOM(mu_train)
self._LauchComputeSolutionBasis(mu_train)
self._LaunchComputeSolutionBasis(mu_train)



Expand Down Expand Up @@ -401,11 +401,14 @@ def _LaunchFOM(self, mu_train, gid_and_vtk_name='FOM_Fit'):



def _LauchComputeSolutionBasis(self, mu_train):
def _LaunchComputeSolutionBasis(self, mu_train):
in_database, hash_basis = self.data_base.check_if_in_database("RightBasis", mu_train)
if not in_database:
BasisOutputProcess = self.InitializeDummySimulationForBasisOutputProcess()
u,sigma = BasisOutputProcess._ComputeSVD(self.data_base.get_snapshots_matrix_from_database(mu_train)) #Calling the RomOutput Process for creating the RomParameter.json
if self.general_rom_manager_parameters["ROM"]["use_non_converged_sols"].GetBool():
u,sigma = BasisOutputProcess._ComputeSVD(self.data_base.get_snapshots_matrix_from_database(mu_train, table_name='NonconvergedFOM')) #TODO this might be too large for single opeartion, add partitioned svd
else:
u,sigma = BasisOutputProcess._ComputeSVD(self.data_base.get_snapshots_matrix_from_database(mu_train, table_name='FOM'))
BasisOutputProcess._PrintRomBasis(u, sigma) #Calling the RomOutput Process for creating the RomParameter.json
self.data_base.add_to_database("RightBasis", mu_train, u )
self.data_base.add_to_database("SingularValues_Solution", mu_train, sigma )
Expand Down
42 changes: 12 additions & 30 deletions applications/RomApplication/python_scripts/rom_nn_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def __init__(self, phisig_norm_matrix, rescaling_factor, w_gradNN, *args, **kwar
super(ANNPROM_Keras_Model,self).__init__(*args, **kwargs)

self.run_eagerly = False

self.phisig_norm_matrix = phisig_norm_matrix
self.w_gradNN = w_gradNN
self.rescaling_factor_x = rescaling_factor
Expand All @@ -31,7 +31,7 @@ def __init__(self, phisig_norm_matrix, rescaling_factor, w_gradNN, *args, **kwar

def train_step(self,data):
input_batch, x_true_batch = data # target_aux is the reference force or residual, depending on the settings

with tf.GradientTape() as tape_d:

with tf.GradientTape() as tape_e:
Expand Down Expand Up @@ -84,7 +84,7 @@ def metrics(self):
# or at the start of `evaluate()`.
# If you don't implement this property, you have to call
# `reset_states()` yourself at the time of your choosing.

return [self.loss_tracker, self.loss_x_tracker, self.loss_gradNN_tracker]


Expand Down Expand Up @@ -129,29 +129,11 @@ def _GetTrainingData(self, n_inf, n_sup):
Q_sup_val = (phisig_inv_sup@S_val).T
Q_inf_train_original = Q_inf_train.copy()

UseNonConvergedSolutionsGathering = self.general_rom_manager_parameters["ROM"]["use_non_converged_sols"].GetBool()
if UseNonConvergedSolutionsGathering:
#fetching nonconverged sols for enlarginign training samples in ann enhanced prom
conn = sqlite3.connect(self.data_base.database_name)
cursor = conn.cursor()
for mu in self.mu_train:
hash_mu, _ = self.data_base.get_hashed_file_name_for_table('NonconvergedFOM', mu)
cursor.execute(f"SELECT file_name FROM {'NonconvergedFOM'} WHERE file_name = ?", (hash_mu,))
result = cursor.fetchone()
if result:
file_name = result[0]
data = self.data_base.get_single_numpy_from_database(file_name)
max_number_of_nonconverged_sols = 1000 #making sure not all data is contained
number_of_cols = data.shape[1]

if True: #use all data !!! data.shape[1] <= max_number_of_nonconverged_sols:
pass
else:
indices = np.linspace(0, number_of_cols - 1, max_number_of_nonconverged_sols).astype(int)
data = data[:, indices]

Q_inf_train = np.r_[Q_inf_train, (phisig_inv_inf@data).T]
Q_sup_train = np.r_[Q_sup_train, (phisig_inv_sup@data).T]
if self.general_rom_manager_parameters["ROM"]["use_non_converged_sols"].GetBool():
#fetching nonconverged sols for enlarging training samples in ann enhanced prom
data = self.data_base.get_snapshots_matrix_from_database(self.mu_train, table_name='NonconvergedFOM') #TODO this might be too large. Add partitioned approached or a limit size
Q_inf_train = np.r_[Q_inf_train, (phisig_inv_inf@data).T]
Q_sup_train = np.r_[Q_sup_train, (phisig_inv_sup@data).T]

phisig_norm_matrix = phisig_sup.T @ phisig_sup

Expand Down Expand Up @@ -213,7 +195,7 @@ def lr_sgdr_scheduler(epoch, lr):
schedulers_dict={"const": lr_const_scheduler, "steps": lr_steps_scheduler, "sgdr": lr_sgdr_scheduler}

return schedulers_dict[strategy_name]

def _DefineNetwork(self, n_inf, n_sup, layers_size, phisig_norm_matrix=None, rescaling_factor=None, w_gradNN=None):
input_layer=layers.Input((n_inf,), dtype=tf.float64)
layer_out=input_layer
Expand All @@ -223,7 +205,7 @@ def _DefineNetwork(self, n_inf, n_sup, layers_size, phisig_norm_matrix=None, res

network=ANNPROM_Keras_Model(phisig_norm_matrix, rescaling_factor, w_gradNN, input_layer, output_layer)
return network

def _SaveWeightsKratosFormat(self, network, weights_path):
layers=[]
for layer in network.trainable_variables:
Expand Down Expand Up @@ -256,7 +238,7 @@ def TrainNetwork(self, seed=None):

Q_inf_train, Q_inf_val, Q_sup_train, Q_sup_val, phisig_norm_matrix, rescaling_factor = self._GetTrainingData(n_inf, n_sup)

network = self._DefineNetwork(n_inf, n_sup, layers_size, phisig_norm_matrix, rescaling_factor, w_gradNN)
network = self._DefineNetwork(n_inf, n_sup, layers_size, phisig_norm_matrix, rescaling_factor, w_gradNN)

# def scaled_phinorm_mse_loss(y_true, y_pred):
# y_diff=y_true-y_pred
Expand Down Expand Up @@ -293,7 +275,7 @@ def TrainNetwork(self, seed=None):
network.save_weights(str(model_path)+"/model.weights.h5")
with open(str(model_path)+"/history.json", "w") as history_file:
json.dump(str(history.history), history_file)

self._SaveWeightsKratosFormat(network, str(model_path)+"/model_weights.npy")

def EvaluateNetwork(self):
Expand Down
Loading