diff --git a/2023_ModelFLOWsapp_Hetheringtonetal.pdf b/2023_ModelFLOWsapp_Hetheringtonetal.pdf deleted file mode 100644 index 1891089..0000000 Binary files a/2023_ModelFLOWsapp_Hetheringtonetal.pdf and /dev/null differ diff --git a/README.md b/README.md deleted file mode 100644 index 031ef46..0000000 --- a/README.md +++ /dev/null @@ -1,70 +0,0 @@ -# ModelFLOWs-app -Welcome to the ModelFLOWs-app repository! - -## Table of contents -* [About ModelFLOWs-app](#about-Modelflows-app) -* [Versions](#versions) -* [Requirements](#requirements) -* [Setup](#setup) -* [Run](#run) -* [Resources](#resources) -* [More info](#more-info) - -## About ModelFLOWs-app -ModelFLOWs-app, an open source Software for data post-processing, patterns identification and development of reduced order models using modal decomposition and deep learning architectures. - -## Versions -There are currently two versions available: -* ModelFLOWs-app desktop version -* ModelFLOWs-app web-browser version (demo) - -## Requirements -ModelFLOWs-app has been developed with: -* Python: 3.9 - -Other relevant libraries that require installation: -* tensorflow: 2.10 -* scikit-learn: 1.2 -* ffmpeg: latest version -* hdf5storage: latest version -* numba: 0.56.4 -* scipy: 1.9.3 -* keras-tuner: latest version -* protobuf: 3.20 - -## Setup -There are two ways to install all required libraries: - -#### Option 1. Install all libraries: -``` -$ cd ../v0.1_ModelFLOWs_app -$ sudo pip install -r Requirements.txt -``` - -#### Option 2. Install each library individually: -``` -$ cd ../Desktop -$ sudo pip install **insert library name** -``` - -## Run -To open ModelFLOWs-app, run the following command: - -#### For the desktop version: -``` -$ cd ../v0.1_ModelFLOWs_app -$ python ModelFLOWs_app.py -``` - -#### For the web-browser demo: -**IMPORTANT** Before running this version, please read the instructions that appear in the *web-browser-version* branch to download the databases used by the ModelFLOWs-app web-browser version. If the databases aren't downloaded correctly, the web-browser version will not work. -``` -$ cd ../v0.1_ModelFLOWs_web -$ streamlit run ModelFLOWs_web.py -``` - -## Resources -Check out the *Tutorial* folder for a more in depth tutorial explaining how to install, run and use ModelFLOWs-app. This folder also contains advanced tutorials explaining how each module of the application works. - -## More info -For more information please visit [ModelFLOWs-app's official website](https://modelflows.github.io/modelflowsapp/). You can also find us on [LinkedIn](https://www.linkedin.com/in/company/modelflows/) diff --git a/Resources/2023_UPM_ModelFLOWs_summary.pdf b/Resources/2023_UPM_ModelFLOWs_summary.pdf deleted file mode 100644 index 29aa747..0000000 Binary files a/Resources/2023_UPM_ModelFLOWs_summary.pdf and /dev/null differ diff --git a/Tutorials/Documents/DL_Superresolution.pdf b/Tutorials/Documents/DL_Superresolution.pdf deleted file mode 100644 index 45a2601..0000000 Binary files a/Tutorials/Documents/DL_Superresolution.pdf and /dev/null differ diff --git a/Tutorials/Documents/Deep_Autoencoders.pdf b/Tutorials/Documents/Deep_Autoencoders.pdf deleted file mode 100644 index 2e3de4c..0000000 Binary files a/Tutorials/Documents/Deep_Autoencoders.pdf and /dev/null differ diff --git a/Tutorials/Documents/Full_DL_RNN_CNN.pdf b/Tutorials/Documents/Full_DL_RNN_CNN.pdf deleted file mode 100644 index 92d5f30..0000000 Binary files a/Tutorials/Documents/Full_DL_RNN_CNN.pdf and /dev/null differ diff --git a/Tutorials/Documents/GappySVD.pdf b/Tutorials/Documents/GappySVD.pdf deleted file mode 100644 index 331f4c5..0000000 Binary files a/Tutorials/Documents/GappySVD.pdf and /dev/null differ diff --git a/Tutorials/Documents/HODMD.pdf b/Tutorials/Documents/HODMD.pdf deleted file mode 100644 index 5ed3db6..0000000 Binary files a/Tutorials/Documents/HODMD.pdf and /dev/null differ diff --git a/Tutorials/Documents/HOSVD.pdf b/Tutorials/Documents/HOSVD.pdf deleted file mode 100644 index 7b69e08..0000000 Binary files a/Tutorials/Documents/HOSVD.pdf and /dev/null differ diff --git a/Tutorials/Documents/Hybrid_RNN_CNN.pdf b/Tutorials/Documents/Hybrid_RNN_CNN.pdf deleted file mode 100644 index b033fa7..0000000 Binary files a/Tutorials/Documents/Hybrid_RNN_CNN.pdf and /dev/null differ diff --git a/Tutorials/Documents/MD_Superresolution.pdf b/Tutorials/Documents/MD_Superresolution.pdf deleted file mode 100644 index a7941bc..0000000 Binary files a/Tutorials/Documents/MD_Superresolution.pdf and /dev/null differ diff --git a/Tutorials/Documents/Tutorial_ModelFLOWs-app.pdf b/Tutorials/Documents/Tutorial_ModelFLOWs-app.pdf deleted file mode 100644 index d9bef36..0000000 Binary files a/Tutorials/Documents/Tutorial_ModelFLOWs-app.pdf and /dev/null differ diff --git a/Tutorials/Videos/Autoencoders_Video.mp4 b/Tutorials/Videos/Autoencoders_Video.mp4 deleted file mode 100644 index a3c1f4c..0000000 Binary files a/Tutorials/Videos/Autoencoders_Video.mp4 and /dev/null differ diff --git a/Tutorials/Videos/DL superresolution.mp4 b/Tutorials/Videos/DL superresolution.mp4 deleted file mode 100644 index 42b6272..0000000 Binary files a/Tutorials/Videos/DL superresolution.mp4 and /dev/null differ diff --git a/Tutorials/Videos/Enhancement_Video.mp4 b/Tutorials/Videos/Enhancement_Video.mp4 deleted file mode 100644 index f01bccf..0000000 Binary files a/Tutorials/Videos/Enhancement_Video.mp4 and /dev/null differ diff --git a/Tutorials/Videos/Full_DL_RNN_CNN.mp4 b/Tutorials/Videos/Full_DL_RNN_CNN.mp4 deleted file mode 100644 index 4ba092a..0000000 Binary files a/Tutorials/Videos/Full_DL_RNN_CNN.mp4 and /dev/null differ diff --git a/Tutorials/Videos/GappySVD_Video.mp4 b/Tutorials/Videos/GappySVD_Video.mp4 deleted file mode 100644 index d40fd88..0000000 Binary files a/Tutorials/Videos/GappySVD_Video.mp4 and /dev/null differ diff --git a/Tutorials/Videos/HODMD-video.mp4 b/Tutorials/Videos/HODMD-video.mp4 deleted file mode 100644 index 95f04dd..0000000 Binary files a/Tutorials/Videos/HODMD-video.mp4 and /dev/null differ diff --git a/Tutorials/Videos/HOSVD-video.mp4 b/Tutorials/Videos/HOSVD-video.mp4 deleted file mode 100644 index 7ca1a79..0000000 Binary files a/Tutorials/Videos/HOSVD-video.mp4 and /dev/null differ diff --git a/Tutorials/Videos/HybridDL_Video.mp4 b/Tutorials/Videos/HybridDL_Video.mp4 deleted file mode 100644 index b6ff5e5..0000000 Binary files a/Tutorials/Videos/HybridDL_Video.mp4 and /dev/null differ diff --git a/Tutorials/Videos/InstallationGuide.mp4 b/Tutorials/Videos/InstallationGuide.mp4 deleted file mode 100644 index 6dbf09c..0000000 Binary files a/Tutorials/Videos/InstallationGuide.mp4 and /dev/null differ diff --git a/v0.1_ModelFLOWs_web/DLsuperresolution_page.py b/v0.1_ModelFLOWs_web/DLsuperresolution_page.py deleted file mode 100644 index 290648d..0000000 --- a/v0.1_ModelFLOWs_web/DLsuperresolution_page.py +++ /dev/null @@ -1,694 +0,0 @@ -import numpy as np -import pickle -from sklearn.metrics import mean_squared_error, mean_absolute_error, median_absolute_error, r2_score, mean_absolute_percentage_error -import pandas as pd -import matplotlib.pyplot as plt -import time -import tensorflow as tf -from tensorflow.keras.layers import Dense, Reshape, Flatten -from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping -from tensorflow.keras.optimizers import Adam -from tensorflow.keras.models import Model -from tensorflow.keras.layers import Input -from tensorflow.keras.layers import Dense, Flatten -import scipy -import scipy.io -import os -from math import floor -import streamlit as st -import data_fetch - - -def menu(): - tf.keras.backend.set_floatx('float64') - - st.title("DNN Superresolution Model") - st.write(""" -This model is a hybrid, since it combines modal decomposition (precisely, SVD) with deep learning. The decomposed (factorized) tensor is -used to train this model to enhance the resolution of downsampled (lower scaled) data to the same resolution as the ground truth data. - """) - st.write(" ## DNN Superresolution Model - Parameter Configuration") - - path0 = os.getcwd() - - def downsampling(): - wantedfile = st.selectbox('Please select a data file', ('DS_30_Tensor_cylinder_Re100.mat', 'DS_30_Tensor.pkl')) - st.info('"DS_30_Tensor_cylinder_Re100.mat" is a downsampled 2D database, while "DS_30_Tensor.pkl" is a downsampled 3D database') - - if wantedfile == 'DS_30_Tensor_cylinder_Re100.mat': - Tensor = data_fetch.fetch_data(path0, wantedfile) - var_sel = np.transpose(Tensor,(3,2,1,0)) - _, ny_sel, nx_sel, _ = var_sel.shape - - - elif wantedfile == 'DS_30_Tensor.pkl': - with open(f'{path0}/{wantedfile}', 'rb') as file: - Tensor=pickle.load(file) - var_sel = np.transpose(Tensor,(4,1,2,3,0)) - _, ny_sel, nx_sel, _, _ = var_sel.shape - - - return var_sel, ny_sel, nx_sel - - def scale_val(x, min_val, max_val): - return((x - min_val)/(max_val - min_val)) - - - def descale_val(x, min_val, max_val): - return(x * (max_val - min_val) + min_val) - - - def custom_scale(tensor): - min_val = np.amin(tensor) - max_val = sum(np.amax(np.abs(tensor),axis=1)) - med_val = np.mean(tensor) - range_val = np.ptp(tensor) - std_val =np.std(tensor) - tensor_norm = XUds / max_val_XUds - - return tensor_norm - - def mean_absolute_percentage_error(y_true, y_pred): - epsilon = 1e-10 - y_true, y_pred = np.array(y_true), np.array(y_pred) - return np.mean(np.abs((y_true - y_pred) / np.maximum(epsilon,np.abs(y_true)))) * 100 - - def smape(A, F): - return ((100.0/len(A)) * np.sum(2 * np.abs(F - A) / (np.abs(A) + np.abs(F))+ np.finfo(float).eps)) - - def RRMSE(Tensor0, Reconst): - RRMSE = np.linalg.norm(np.reshape(Tensor0-Reconst,newshape=(np.size(Tensor0),1)),ord=2)/np.linalg.norm(np.reshape(Tensor0,newshape=(np.size(Tensor0),1))) - return(RRMSE) - - def error_tables(Xr_test, Xr_hat, Xr_sel,tabname1,tabname2, path0, filename, nvar): - results_table = pd.DataFrame(index=['MSE','MAE','MAD','R2','SMAPE','RRMSE', 'MAPE'],columns=['All']) - for i in range(1): - results_table.iloc[0,i] = mean_squared_error( Xr_test.flatten(), Xr_hat.flatten()) - results_table.iloc[1,i] = mean_absolute_error(Xr_test.flatten(), Xr_hat.flatten()) - results_table.iloc[2,i] = median_absolute_error( Xr_test.flatten(), Xr_hat.flatten()) - results_table.iloc[3,i] = r2_score(Xr_test.flatten(), Xr_hat.flatten()) - results_table.iloc[4,i] = smape( Xr_test.flatten(), Xr_hat.flatten()) - results_table.iloc[5,i] = RRMSE( np.reshape(Xr_test,(-1,1)), np.reshape(Xr_hat,(-1,1)))*100 - results_table.iloc[6,i] = mean_absolute_percentage_error( Xr_test.flatten(), Xr_hat.flatten()) - df1 = pd.DataFrame(results_table) - df1.to_csv(f'{path0}/{filename}/{tabname1}.csv') - - num_layers = Xr_sel.shape[3] - - cols = [] - for i in range(1, nvar + 1): - cols.append(f'var{i}') - - results_table2 = pd.DataFrame(index=['MSE','MAE','MAD','R2','SMAPE','RRMSE','MAPE'],columns=cols) - for i in range(num_layers): - results_table2.iloc[0,i] = mean_squared_error( Xr_test[...,i].flatten(), Xr_hat[...,i].flatten()) - results_table2.iloc[1,i] = mean_absolute_error(Xr_test[...,i].flatten(), Xr_hat[...,i].flatten()) - results_table2.iloc[2,i] = median_absolute_error( Xr_test[...,i].flatten(), Xr_hat[...,i].flatten()) - results_table2.iloc[3,i] = r2_score(Xr_test[...,i].flatten(), Xr_hat[...,i].flatten()) - results_table2.iloc[4,i] = smape( Xr_test[...,i].flatten(), Xr_hat[...,i].flatten()) - results_table2.iloc[5,i] = RRMSE( np.reshape(Xr_test[...,i],(-1,1)), np.reshape(Xr_hat[...,i],(-1,1)))*100 - results_table2.iloc[6,i] = mean_absolute_percentage_error( Xr_test[...,i].flatten(), Xr_hat[...,i].flatten()) - df2 = pd.DataFrame(results_table2) - df2.to_csv(f'{path0}/{filename}/{tabname2}.csv') - - - return results_table, results_table2 - - def figures_results_ylim(n_snap,var, yg, xg, y, x, Xr_test, Xr_sel, Xr_hat, figname): - fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(22, 5)) - - fig.tight_layout() - - im1 = ax[0].contourf(yg,xg,np.transpose(Xr_test[n_snap,...,var],(1,0)), - vmin=np.min(np.transpose(Xr_test[n_snap,...,var],(1,0))), - vmax=np.max(np.transpose(Xr_test[n_snap,...,var],(1,0)))) - #ax[0].set_ylim(bottom=0,top=16) - ax[0].set_axis_off() - ax[0].set_title('Ground truth', fontsize = 14) - - im2 = ax[1].contourf(y,x,np.transpose(Xr_sel[n_snap,...,var],(1,0)), - vmin=np.min(np.transpose(Xr_test[n_snap,...,var],(1,0))), - vmax=np.max(np.transpose(Xr_test[n_snap,...,var],(1,0)))) - #ax[1].set_ylim(bottom=0,top=16) - ax[1].set_axis_off() - ax[1].set_title('Initial data', fontsize = 14) - - im3 = ax[2].contourf(yg,xg,np.transpose(Xr_hat[n_snap,...,var],(1,0)), - vmin=np.min(np.transpose(Xr_test[n_snap,...,var],(1,0))), - vmax=np.max(np.transpose(Xr_test[n_snap,...,var],(1,0)))) - #ax[2].set_ylim(bottom=0,top=16) - ax[2].set_axis_off() - ax[2].set_title('Enhanced data', fontsize = 14) - - fig.tight_layout() - plt.savefig(figname) - if n_snap == 0: - st.pyplot(fig) - - def videos_results(videoname, var, n_train, test_ind,xg, yg, x, y, Xr_test, Xr_hat, Xr_sel): - from matplotlib import animation - figure, ax = plt.subplots(nrows=1, ncols=3, figsize=(22, 5)) - figure.tight_layout() - - def animation_function(i): - figure.suptitle(f'Snapshot: {n_train+i}', fontsize=20) - im1 = ax[0].contourf(yg,xg,np.transpose(Xr_test[i,...,var],(1,0)), - vmin=np.min(np.transpose(Xr_test[0,...,var],(1,0))), - vmax=np.max(np.transpose(Xr_test[0,...,var],(1,0)))) - ax[0].set_axis_off() - ax[0].set_title('Ground truth', fontsize = 14) - - im2 = ax[1].contourf(y,x,np.transpose(Xr_sel[i,...,var],(1,0)), - vmin=np.min(np.transpose(Xr_test[0,...,var],(1,0))), - vmax=np.max(np.transpose(Xr_test[0,...,var],(1,0)))) - ax[1].set_axis_off() - ax[1].set_title('Original data', fontsize = 14) - - im3 = ax[2].contourf(yg,xg,np.transpose(Xr_hat[i,...,var],(1,0)), - vmin=np.min(np.transpose(Xr_test[0,...,var],(1,0))), - vmax=np.max(np.transpose(Xr_test[0,...,var],(1,0)))) - ax[2].set_axis_off() - ax[2].set_title('Enhanced data', fontsize = 14) - - figure.subplots_adjust(right=0.8, top=0.9) - cbar_ax = figure.add_axes([0.85, 0.09, 0.01, 0.85]) - cbar = figure.colorbar(im1, cax=cbar_ax) - cbar.ax.tick_params(labelsize=20) - - - anim = animation.FuncAnimation(figure, animation_function, frames=test_ind.size, interval=1000, blit=False) - writergif = animation.PillowWriter(fps=5) - anim.save(videoname, writer=writergif) - - # Showing the video slows down the performance - - # Inputs - # Load database - - - [Xr_sel, ny_sel, nx_sel]=downsampling() - - if Xr_sel.ndim == 4: - Xr = data_fetch.fetch_data(path0, "Tensor_cylinder_Re100.mat") - Xr=np.transpose(Xr,(3,2,1,0)) - nt, ny, nx, nvar = Xr.shape - dims = 'Data2D' - - elif Xr_sel.ndim == 5: - with open(f'{path0}/Tensor.pkl', 'rb') as file: - Tensor=pickle.load(file) - Xr=np.transpose(Tensor,(4,1,2,3,0)) - nt, ny, nx, nz, nvar = Xr.shape - dims = 'Data3D' - - decision1 = st.radio('Apply a first scaling to the input data?', ('No', 'Yes')) - if not decision1 or decision1.strip().lower() in ['n', 'no']: - decision1='no-scaling' - - elif decision1.strip().lower() in ['y', 'yes']: - decision1='scaling' - - if decision1 == 'scaling': - decision2 = st.radio('Output scaling graphs?', ('No', 'Yes')) - if not decision2 or decision2.strip().lower() in ['n', 'no']: - decision2 = 'no' - elif decision2.strip().lower() in ['y', 'yes']: - decision2 = 'yes' - - else: - decision2 = 'no' - - decision3 = st.radio('Apply a second scaling to the input data?', ('No', 'Yes')) - if not decision3 or decision3.strip().lower() in ['n', 'no']: - decision3='no' - - elif decision3.strip().lower() in ['y', 'yes']: - decision3='yes' - - decision4 = st.radio('Shuffle train and test data?', ('No', 'Yes')) - if not decision4 or decision4.strip().lower() in ['n', 'no']: - decision4='no-shuffle' - - elif decision4.strip().lower() in ['y', 'yes']: - decision4 = 'shuffle' - - - bs = st.slider('Select the data batch size', min_value = 0, max_value = 32, value = 16, step = 1) - bs = int(bs) - - neurons = st.slider('Select the number of neurons per layer', min_value = 1, max_value = 100, value = 50, step = 1) - neurons = int(neurons) - - act_func = st.selectbox('Select an activation function for the hidden layers', ("elu", "relu", "tanh", "elu", "sigmoid", "linear")) - act_fun2 = st.selectbox('Select an activation function for the output layers', ("tanh", "elu", "relu", "linear", "elu", "sigmoid")) - - learn_rate = st.number_input('Select the model learning rate', min_value = 0.0001, max_value = 0.1, value = 0.005, step = 0.0001, format = '%.4f') - learn_rate = float(learn_rate) - - loss_function='mse' - - n_train = st.number_input(f'Select the number of samples for the training data. Continue with {round(int(nt)*0.8)} samples (80% aprox. RECOMMENDED)', min_value = 1, max_value = nt, value = round(int(nt)*0.8), step = 1) - n_train = int(n_train) - - epoch = st.slider('Select the number of training epoch', min_value = 1, max_value = 200, value = 150, step = 10) - epoch = int(epoch) - - decision5 = st.radio('Would you like to save the results?', ('Yes', 'No')) - if not decision5 or decision5.strip().lower() in ['y', 'yes']: - decision5='yes' - - elif decision5.strip().lower() in ['n', 'no']: - decision5 = 'no' - - decision7 = st.radio('Data comparison plot?', ('Yes', 'No')) - if not decision7 or decision7.strip().lower() in ['y', 'yes']: - decision7='yes' - - elif decision7.strip().lower() in ['n', 'no']: - decision7 = 'no' - - decisionx = st.radio('Create comparison videos of the ground truth vs. the predicted data?', ('No', 'Yes')) - if not decisionx or decisionx.strip().lower() in ['n', 'no']: - decisionx='no' - - elif decisionx.strip().lower() in ['y', 'yes']: - decisionx = 'yes' - - - filename0 = f'DNN_superresolution_solution' - - if not os.path.exists(f'{path0}/{filename0}'): - os.mkdir(f'{path0}/{filename0}') - - if Xr_sel.ndim == 4: - dat = '2DcylinderRe100' - - elif Xr_sel.ndim == 5: - dat = '3Dcylinder' - - filename1 = f'{dat}_{decision1}_{decision4}' - - if not os.path.exists(f'{path0}/{filename0}/{filename1}'): - os.mkdir(f'{path0}/{filename0}/{filename1}') - - filename = f'{filename0}/{filename1}' - - go = st.button('Calculate') - - if go: - with st.spinner('Please wait for the run to complete'): - st.write("") - st.write('Downsampled data summary') - st.write('Xr_sel: ', Xr_sel.shape) - st.write('ny_sel =', ny_sel) - st.write('nx_sel =', nx_sel) - st.write("") - - st.write('Ground truth data summary') - st.write('Xr: ', Xr.shape) - st.write('ny =', ny) - st.write('nx =', nx) - st.write("") - - if Xr.ndim==3: - nt=100 - Xr= np.repeat(Xr[np.newaxis,...],nt, axis=0) - Xr_sel= np.repeat(Xr_sel[np.newaxis,...],nt, axis=0) - - - if dims=='Data3D': - Xr_sel=np.reshape(Xr_sel,(nt,ny_sel,nx_sel*nz,nvar),order='F') - Xr=np.reshape(Xr,(nt,ny,nx*nz,nvar),order='F') - - # SCALING - - if decision1=='scaling': - var_sel_min = [] - var_sel_max = [] - st.write('Scaling data') - for i in range(nvar): - var_sel_min.append(np.min(Xr_sel[...,i])) - var_sel_max.append(np.max(Xr_sel[...,i])) - st.write(f'var{i+1}_sel_min: {var_sel_min[i]}, var{i+1}_sel_max: {var_sel_max[i]}') - - var_sel_min = np.array(var_sel_min) - var_sel_max = np.array(var_sel_max) - - var_sel_sc = [] - for i in range(nvar): - var_sel_sc.append(np.array(scale_val(Xr_sel[...,i], var_sel_min[i], var_sel_max[i]))) - st.write(f'var{i+1}_sel_sc shape: {np.array(var_sel_sc[i]).shape}') - - Xr_sel_sc = np.stack(var_sel_sc, axis=3) - st.write('var_sel_sc shape: ', Xr_sel_sc.shape) - - var_min = [] - var_max = [] - for i in range(nvar): - var_min.append(np.min(Xr[...,i])) - var_max.append(np.max(Xr[...,i])) - st.write(f'var{i+1}_min: {var_min[i]}, var{i+1}_max: {var_max[i]}') - - var_min = np.array(var_min) - var_max = np.array(var_max) - - var_sc = [] - for i in range(nvar): - var_sc.append(scale_val(Xr[...,i], var_min[i], var_max[i])) - st.write(f'var{i+1}_sel_sc shape: ',var_sc[i].shape) - - var_sc = np.array(var_sc) - Xr_sc = np.stack(var_sc, axis=3) - st.write('var_sel_sc shape: ', Xr_sc.shape) - - else: - None - - if decision2 == 'yes': - fig, ax = plt.subplots() - - ax.hist(Xr.flatten(), bins='auto') - ax.set_title('Ground truth data distribution') - ax.set_ylim(0,2e5) - ax.set_xlim(-5,5) - st.pyplot(fig) - - fig, ax = plt.subplots() - ax.set_title('Scaled downsampled data distribution') - ax.hist(Xr_sc.flatten(), bins='auto') - st.pyplot(fig) - - # SVD - - if 'nz' in locals(): - XUds=np.zeros([nt,ny_sel,ny_sel,nvar]) - XSds=np.zeros([nt,ny_sel,ny_sel,nvar]) - XVds=np.zeros([nt,ny_sel,nx_sel*nz,nvar]) - else: - XUds=np.zeros([nt,ny_sel,nx_sel,nvar]) - XSds=np.zeros([nt,nx_sel,nx_sel,nvar]) - XVds=np.zeros([nt,nx_sel,nx_sel,nvar]) - - if 'Xr_sel_sc' in locals(): - for i in range(np.size(Xr_sel_sc,0)): - U_var_sel_sc = [] - S_var_sel_sc = [] - V_var_sel_sc = [] - for j in range(nvar): - - U, S, V = np.linalg.svd(Xr_sel_sc[i,...,j], full_matrices=False) - U_var_sel_sc.append(U) - S_var_sel_sc.append(np.diag(S)) - V_var_sel_sc.append(V) - - XUds_= np.stack(U_var_sel_sc, axis=-1) - XSds_= np.stack(S_var_sel_sc, axis=-1) - XVds_= np.stack(V_var_sel_sc, axis=-1) - - XUds[i,...]=XUds_ - XSds[i,...]=XSds_ - XVds[i,...]=XVds_ - st.write("") - st.write('SVD Summary') - st.write('XUds: ',XUds.shape) - st.write('XSds: ',XSds.shape) - st.write('XVds: ',XVds.shape) - - else: - for i in range(np.size(Xr_sel,0)): - U_var_sel = [] - S_var_sel = [] - V_var_sel = [] - for j in range(nvar): - - U, S, V = np.linalg.svd(Xr_sel[i,...,j], full_matrices=False) - U_var_sel.append(U) - S_var_sel.append(np.diag(S)) - V_var_sel.append(V) - - XUds_= np.stack(U_var_sel, axis=-1) - XSds_= np.stack(S_var_sel, axis=-1) - XVds_= np.stack(V_var_sel, axis=-1) - - XUds[i,...]=XUds_ - XSds[i,...]=XSds_ - XVds[i,...]=XVds_ - st.write("") - st.write('SVD Summary') - st.write('XUds: ',XUds.shape) - st.write('XSds: ',XSds.shape) - st.write('XVds: ',XVds.shape) - - fig, ax = plt.subplots() - ax.set_title('Left singular vectors data distribution') - ax.hist(XUds.flatten(), bins='auto') - st.pyplot(fig) - - fig, ax = plt.subplots() - ax.set_title('Right singular vectors data distribution') - ax.hist(XVds.flatten(), bins='auto') - st.pyplot(fig) - - ind=np.linspace(0,nt-1,nt,dtype=int) - - if decision4=='shuffle': - np.random.shuffle(ind) - ind - - train_ind = ind[0:n_train] - test_ind = ind[n_train:] - - if 'Xr_sel_sc' in locals(): - Xr_train = Xr_sc[train_ind] - Xr_test = Xr_sc[test_ind] - else: - Xr_train = Xr[train_ind] - Xr_test = Xr[test_ind] - - XUds_train = XUds[train_ind] - XUds_test = XUds[test_ind] - XSds_train = XSds[train_ind] - XSds_test = XSds[test_ind] - XVds_train = XVds[train_ind] - XVds_test = XVds[test_ind] - - np.product(Xr_train.shape)+np.product(Xr_test.shape) - - if not os.path.exists(f"{path0}/{filename}/weights"): - os.makedirs(f"{path0}/{filename}/weights") - - file_name = f"{path0}/{filename}/weights/Interp_dense_NN_v1.0" - - save_best_weights = file_name + '_best.h5' - save_last_weights = file_name + '_last.h5' - save_summary_stats = file_name + '.csv' - - # Model inputs - in_U_dim = XUds.shape[1:] - in_S_dim = XSds.shape[1:] - in_V_dim = XVds.shape[1:] - out_dim = Xr_train.shape[1:] - - # Neural Network construction - - def create_model_1(in_U_dim, in_S_dim, in_V_dim, out_dim): - in_U = Input(shape=(*in_U_dim,),name='in_u') - in_S = Input(shape=(*in_S_dim,),name='in_s') - in_V = Input(shape=(*in_V_dim,),name='in_v') - - u = Flatten(name='u_1')(in_U) - u = Dense (neurons, activation=act_func, name='u_2')(u) - XUus = Dense(out_dim[0]*in_U_dim[1]*in_U_dim[2],activation=act_fun2,name='u_3')(u) - - v = Flatten(name='v_1')(in_V) - v = Dense (neurons, activation=act_func, name='v_2')(v) - XVus = Dense(in_V_dim[0]*out_dim[1]*in_V_dim[2],activation=act_fun2,name='v_3')(v) - - XUus_reshape = Reshape((out_dim[0],in_U_dim[1],in_U_dim[2]),name='reshape_u')(XUus) - XVus_reshape = Reshape((in_V_dim[0],out_dim[1],in_V_dim[2]),name='reshape_v')(XVus) - - X_hat = tf.einsum('ijkl,iknl,inpl->ijpl', XUus_reshape, in_S, XVus_reshape) - - m = Model(inputs=[in_U,in_S,in_V],outputs= X_hat) - m_upsampled_matrices = Model(inputs=[in_U,in_S,in_V],outputs= [XUus_reshape,XVus_reshape]) - - m.compile(loss=loss_function, optimizer=Adam(learn_rate), metrics=['mse']) - - return(m, m_upsampled_matrices) - - model, model_upsampled_matrices = create_model_1(in_U_dim, in_S_dim, in_V_dim, out_dim) - - t0 = time.time() - callbacks = [ModelCheckpoint(save_best_weights, monitor='val_loss', save_best_only=True, mode='auto'), - EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto', min_delta = 0.0001)] - - model.summary(print_fn=lambda x: st.text(x)) - - history = model.fit([XUds_train,XSds_train,XVds_train],Xr_train, - batch_size=bs, - epochs=epoch, - validation_split=0.15, - verbose=1, - shuffle=True, - initial_epoch = 0, - callbacks=callbacks) - - st.success('The model has been trained!') - - # Training stats - - summary_stats = pd.DataFrame({'epoch': [ i + 1 for i in history.epoch ], - 'train_acc': history.history['mse'], - 'valid_acc': history.history['val_mse'], - 'train_loss': history.history['loss'], - 'valid_loss': history.history['val_loss']}) - - summary_stats.to_csv(save_summary_stats) - - fig, ax = plt.subplots() - - ax.set_yscale("log") - ax.set_title('Training vs. Validation loss', fontsize = 14) - ax.plot(summary_stats.train_loss, 'b', label = 'Train loss') - ax.plot(summary_stats.valid_loss, 'g', label = 'Valid. loss') - ax.legend(loc = 'upper right') - st.pyplot(fig) - - fig, ax = plt.subplots() - - ax.set_title('Training vs. Validation accuracy', fontsize = 14) - ax.plot(summary_stats.train_acc, 'b', label = 'Train accuracy') - ax.plot(summary_stats.valid_acc, 'g', label = 'Valid. accuracy') - ax.legend(loc = 'upper right') - st.pyplot(fig) - - min_loss, idx = min((loss, idx) for (idx, loss) in enumerate(history.history['val_loss'])) - st.write('###### Minimum val_loss at epoch', '{:d}'.format(idx+1), '=', '{:.4f}'.format(min_loss)) - min_loss = round(min_loss, 4) - - # Load the best model epoch - - model.load_weights(save_best_weights) - - # Model prediction on training data - Xr_hat = model.predict([XUds_test, XSds_test, XVds_test]) - - if decision1=='scaling': - Xr_test = Xr[test_ind] - - for i in range(nvar): - Xr_hat[...,i] = descale_val(Xr_hat[...,i], var_min[i], var_max[i]) - else: - None - - if dims=='Data3D': - Xr_test=np.reshape(Xr_test,(nt-n_train,ny,nx,nz,nvar),order='F') - Xr_hat=np.reshape(Xr_hat,(nt-n_train,ny,nx,nz,nvar),order='F') - Xr_sel=np.reshape(Xr_sel,(nt,ny_sel,nx_sel,nz,nvar),order='F') - - - xg = np.linspace(0,nx,nx) - yg = np.linspace(0,ny,ny) - - x = np.linspace(0,nx,nx_sel) - y = np.linspace(0,ny,ny_sel) - xx, yy = np.meshgrid(x, y) - - # Saving results - if decision5 == 'yes': - if not os.path.exists(f"{path0}/{filename}/tables"): - os.makedirs(f"{path0}/{filename}/tables") - if not os.path.exists(f"{path0}/{filename}/videos"): - os.makedirs(f"{path0}/{filename}/videos") - - st.info(f'Plotting first 3 snapshots for all {nvar} variables') - for nv in range(nvar): - for nt in range(3): - if 'nz' in locals(): - fig, ax = plt.subplots() - n5 = int(Xr_test.shape[3] / 2) - fig.suptitle(f'XY plane - Component {nv+1} Snapshot {nt+1}', fontsize = 16) - ax.contourf(yg,xg,np.transpose(Xr_hat[nt,..., n5, nv], (1,0))) - ax.set_title('Predicted data - XY Plane', fontsize = 14) - ax.set_xlabel('X', fontsize = 12) - ax.set_ylabel('Y', fontsize = 12) - fig.tight_layout() - st.pyplot(fig) - - else: - fig, ax = plt.subplots() - fig.suptitle(f'Component {nv+1} Snapshot {nt+1}', fontsize = 16) - ax.contourf(yg,xg,np.transpose(Xr_hat[nt,...,nv], (1,0))) - ax.set_title('Predicted data', fontsize = 14) - ax.set_xlabel('X', fontsize = 12) - ax.set_ylabel('Y', fontsize = 12) - fig.tight_layout() - st.pyplot(fig) - - if decision5 == 'yes': - tabname1 = "tables/NN_table1" - tabname2 = "tables/NN_table2" - - if 'nz' in locals(): - Xr_test_p0=Xr_test[...,0,:] - Xr_hat_p0=Xr_hat[...,0,:] - Xr_sel_p0=Xr_sel[...,0,:] - - [results_table, results_table2] = error_tables(Xr_test_p0, Xr_hat_p0, Xr_sel_p0,tabname1,tabname2, path0, filename, nvar) - else: - [results_table, results_table2] = error_tables(Xr_test, Xr_hat, Xr_sel, tabname1, tabname2, path0, filename, nvar) - - if decision5 == 'yes': - st.write('Performance measures on Test data, for all measures') - st.table(results_table) - - - st.write('Performance measures on Test data, for one specific layer of measures') - st.table(results_table2) - - # Video creation - if decisionx == 'yes': - with st.spinner(f'Generating videos for all {nvar} variables. Video will be saved to {path0}/{filename}/videos. This may take some time'): - for var in range(nvar): - if 'nz' in locals (): - n5 = int(Xr_test.shape[3] / 2) - videoname=f"{path0}/{filename}/videos/var{var}_p{n5}.gif" - Xr_test_sel=Xr_test[...,n5,:] - Xr_hat_sel=Xr_hat[...,n5,:] - Xr_sel_sel=Xr_sel[...,n5,:] - videos_results(videoname, var, n_train, test_ind, xg, yg, x, y, Xr_test_sel, Xr_hat_sel, Xr_sel_sel) - else: - videoname=f"{path0}/{filename}/videos/var{var}.gif" - videos_results(videoname, var, n_train, test_ind, xg, yg, x, y, Xr_test, Xr_hat, Xr_sel) - st.success('All snapshots have been saved!') - - if decision7=='yes': - st.info('Plotting comparison of enhanced data, original data, and ground truth for first 3 snapshots') - for var in range(nvar): - os.makedirs(f"{path0}/{filename}/compare_plots", exist_ok=True) - os.makedirs(f"{path0}/{filename}/compare_plots/var{var}", exist_ok=True) - figname = f"{path0}/{filename}/compare_plots/var{var}/var{var}_scatter.pdf" - files3 = os.listdir(f'{path0}/{filename}/compare_plots') - - os.makedirs(f"{path0}/{filename}/compare_plots/var{var}",exist_ok=True) - - for n_snap in range(0, 3): - if 'nz' in locals(): - n5 = int(Xr_test.shape[3] / 2) - figname = f"{path0}/{filename}/compare_plots/var{var}/var{var}_snap{n_train+n_snap}_p{n5}.pdf" - - Xr_test_sel=Xr_test[...,n5,:] - Xr_hat_sel=Xr_hat[...,n5,:] - Xr_sel_sel=Xr_sel[...,n5,:] - figures_results_ylim(n_snap, var, yg, xg, y, x, Xr_test_sel, Xr_sel_sel, Xr_hat_sel, figname) - - else: - figname = f"{path0}/{filename}/compare_plots/var{var}/var{var}_{n_train+n_snap}.pdf" - figures_results_ylim(n_snap, var, yg, xg, y, x, Xr_test, Xr_sel, Xr_hat, figname) - - np.save(f'{path0}/{filename}/EnhancedData.npy', Xr_hat) - - st.info("Press 'Refresh' to run a new case") - Refresh = st.button('Refresh') - if Refresh: - st.stop() \ No newline at end of file diff --git a/v0.1_ModelFLOWs_web/DMDd.py b/v0.1_ModelFLOWs_web/DMDd.py deleted file mode 100644 index a46cb05..0000000 --- a/v0.1_ModelFLOWs_web/DMDd.py +++ /dev/null @@ -1,515 +0,0 @@ -import numpy as np -import numba as nb -import streamlit as st - -def matlen(var): - '''Equivalent to Matlab's length()''' - if np.size(np.shape(var))==1: - x = np.size(var) - else: - x = max(np.shape(var)) - return x - -def error(V,Vrec): - '''Relative RMS and max errors''' - return np.linalg.norm(V-Vrec)/np.linalg.norm(V),np.linalg.norm(V-Vrec,np.inf)/np.linalg.norm(V,np.inf) - -@nb.njit(cache=True) - -def truncatedSVD(A,esvd): - '''Decomposition into singular values, truncated on esvd''' - U,s,Wh=np.linalg.svd(A,full_matrices=False) - n=0 - norm=np.linalg.norm(s) - for i in range(s.size): - if np.linalg.norm(s[i:])/norm<=esvd: - break - else: - n+=1 - return U[:,:n],s[:n],Wh[:n,:] - -def unfold(A,dim): - '''Turns tensor into matrix keeping the columns on dim''' - ax=np.arange(A.ndim) - return np.reshape(np.moveaxis(A,ax,np.roll(ax,dim)),(A.shape[dim],A.size//A.shape[dim])) - -def fold(B,shape,dim): - '''Reverse operation to the unfold function''' - ax=np.arange(len(shape)) - shape=np.roll(shape,-dim) - A=np.reshape(B,shape) - return np.moveaxis(A,ax,np.roll(ax,-dim)) - -def tprod(S,U): - '''Tensor product of an ndim-array and multiple matrices''' - T = S - shap = list(np.shape(S)) - for i in range(0,np.size(U)): - x = np.count_nonzero(U[0][i]) - if not x==0: - shap[i] = np.shape(U[0][i])[0] - H = unfold(T,i) - T = fold(np.dot(U[0][i],H), shap, i) - return T - -def tensormatmul(S,U,dim): - '''Internal product of tensor and matrix in dim''' - shape=np.array(S.shape) - shape[dim]=U.shape[0] - return fold(U@unfold(S,dim),shape,dim) - -def truncHOSVD(A,esvd): - '''Decomposition into singular values for tensors, truncated in esvd''' - Ulist=[] - slist=[] - S=A.copy() - for i in range(A.ndim): - [U,s,Vh]=truncatedSVD(unfold(A,i),esvd/np.sqrt(A.ndim)) - Ulist.append(U) - S=tensormatmul(S,U.T,i) - for i in range(A.ndim): - s=np.zeros(S.shape[0]) - ax=np.arange(A.ndim) - for j in range(S.shape[0]): - s[j]=np.linalg.norm(S[j]) - slist.append(s) - S=np.moveaxis(S,ax,np.roll(ax,1)) - return S,Ulist,slist - -def dmd1(V,t,esvd,edmd): - '''First order dynamic modal decomposition: - Input: - -V (IxJ): Snapshot matrix. - -t (J): Time vector. - -esvd: First tolerance (SVD). - -edmd: Second tolerance (DMD modes). - Output: - -u (Ixn): mode matrix (columns) sorted from biggest to smallest amplitude, put to scale. - -areal (n): amplitude vector (sorted) used to put u to scale. - -eigval: eigenvalues - -delta (n): growth rate vector. - -omega (n): frequency vector. - -DMDmode: Modes''' - dt=t[1]-t[0] - - #Reduced snapshots: - U,s,Wh=truncatedSVD(V,esvd) #I*r', r'*r', r'*J -# Vvir=np.conj(U[:,:n].T)@V -# Vvir=np.diag(s[:n])@Wh[:n,:] #r'*J -# Vvir=s[:n]*Wh[:n,:] - Vvir=np.diag(s)@Wh #r'*J - n=s.size - - #Spatial complexity kk: - NormS=np.linalg.norm(s,ord=2) - kk=0 - for k in range(0,n): - if np.linalg.norm(s[k:n],2)/NormS>esvd: - kk=kk+1 - st.write(f'Spatial complexity: {kk}') - - #Koopman matrix reconstruction: - Uvir,svir,Wvirh=np.linalg.svd(Vvir[:,:-1],full_matrices=False) #r'*r', r'*r', r'*(J-1) - Rvir=Vvir[:,1:]@Wvirh.conj().T@np.diag(svir**-1)@Uvir.conj().T #r'*r' - eigval,eigvec=np.linalg.eig(Rvir) #r'*r' - - #Frequencies and Growthrate: - delta=np.log(eigval).real/dt #r' - omega=np.log(eigval).imag/dt #r' - - #Amplitudes: - A=np.zeros((eigvec.shape[0]*Vvir.shape[1],eigvec.shape[1])) #(r'*J)*r' - b=np.zeros(eigvec.shape[0]*Vvir.shape[1])#(r'*J)*1 - for i in range(Vvir.shape[1]): - A[i*eigvec.shape[0]:(i+1)*eigvec.shape[0],:]=eigvec@np.diag(eigval**i) - b[i*eigvec.shape[0]:(i+1)*eigvec.shape[0]]=Vvir[:,i] - - Ua,sa,Wa=np.linalg.svd(A,full_matrices=False) #(r'*J)*r', r'*r', r'*r' - a=Wa.conj().T@np.diag(sa**-1)@Ua.conj().T@b #r' - - #Modes: - uvir=eigvec@np.diag(a) #r'*r' -# u=U[:,:n]@uvir #I*r' - u=U@uvir #I*r' -# areal=np.zeros(a.size) #Real amplitudes -# for i in range(u.shape[1]): -# areal[i]=np.linalg.norm(u[:,i])/np.sqrt(V.shape[1]) - areal=np.linalg.norm(u,axis=0)/np.sqrt(V.shape[0]) - - #Spectral complexity: - kk3=0 - for m in range(0,np.size(areal)): - if areal[m]/np.max(areal)>edmd: - kk3=kk3+1 - st.write(f'Spectral complexity: {kk3}') - - idx=np.flip(np.argsort(areal)) - u=u[:,idx] - areal=areal[idx] - eigval=eigval[idx] - delta=delta[idx] - omega=omega[idx] - - #Filter important ones: - mask=(areal/areal[0])>edmd - - #Mode Matrix: - ModeMatr=np.zeros((kk3,4)) - for ii in range(0,kk3): - ModeMatr[ii,0]=ii+1 - ModeMatr[ii,1]=delta[mask][ii] - ModeMatr[ii,2]=omega[mask][ii] - ModeMatr[ii,3]=areal[mask][ii] - # print('Mode Number, GrowthRate, Frequency and Amplitude of each mode:') - # print(ModeMatr) - - #Calculate modes: - u=u[:,mask] - U = U[:,0:kk] - DMDmode=np.zeros((V.shape[0],kk3),dtype=np.complex128) - Amplitude0=np.zeros(kk3) - for m in range(0,kk3): - NormMode=np.linalg.norm(np.dot(U,u[:,m]),ord=2)/np.sqrt(V.shape[0]) - Amplitude0[m]=NormMode - DMDmode[:,m]=np.dot(U,u[:,m])/NormMode - - return u,areal[mask],eigval[mask],delta[mask],omega[mask],DMDmode - -#@nb.njit(cache=True) -def hodmd(V,d,t,esvd,edmd): - '''High order (d) modal decomposition: - Input: - -V (IxJ): snapshot matrix. - -t (J): time vector. - -d: parameter of DMD-d, higher order Koopman assumption (int>=1). - -esvd: first tolerance (SVD). - -edmd: second tolerance (DMD-d modes). - Output: - -u (Ixn): mode matrix (columns) sorted from biggest to smallest amplitude, put to scale. - -areal (n): amplitude vector (sorted) used to put u to scale. - -eigval: eigenvalues - -delta (n): growth rate vector. - -omega (n): frequency vector. - -DMDmode: Modes''' - dt=t[1]-t[0] - - #Reduced snapshots: - U,s,Wh=truncatedSVD(V,esvd) #I*n, n*n, n*J -# Vvir=np.conj(U[:,:n].T)@V - Vvir=np.diag(s)@Wh #n*J - #print("Tamaño Vvir: ",Vvir.shape) -# Vvir=s*Wh - n=s.size - - #Spatial complexity kk: - NormS=np.linalg.norm(s,ord=2) - kk=0 - for k in range(0,n): - if np.linalg.norm(s[k:n],2)/NormS>esvd: - kk=kk+1 - st.write(f'Spatial complexity: {kk}') - - #Reduced and grouped snapshots: - Vdot=np.zeros((d*n,Vvir.shape[1]-d+1),dtype=Vvir.dtype) #(d*n)*(J-d+1) - for j in range(d): - Vdot[j*n:(j+1)*n,:]=Vvir[:,j:Vvir.shape[1]-d+j+1] - #print("Size Vdot: ",Vdot.shape) - - #Reduced, grouped and again reduced snapshots: - Udot,sdot,Whdot=truncatedSVD(Vdot,esvd) #(d*n)*N, N*N, N*(J-d+1) - Vvd=np.diag(sdot)@Whdot #N*(J-d+1) - #print("Size Vvd: ",Vvd.shape) - - #Spatial dimension reduction: - st.write(f'Spatial dimension reduction: {np.size(sdot)}') - - #Koopman matrix reconstruction: - Uvd,svd,Whvd=np.linalg.svd(Vvd[:,:-1],full_matrices=False) #N*r', r'*r', r'*(J-d) - Rvd=Vvd[:,1:]@Whvd.conj().T@np.diag(svd**-1)@Uvd.conj().T #r'*r' - eigval,eigvec=np.linalg.eig(Rvd) #r'*r'==N*N - #print("Size Rvd: ",Rvd.shape) - - #Frequencies and growthrate: - delta=np.log(eigval).real/dt #r' - omega=np.log(eigval).imag/dt #r' - - #Modes - q=(Udot@eigvec)[(d-1)*n:d*n,:] #Taking steps back n*N - Uvir=q/np.linalg.norm(q,axis=0) #n*N - #print("Size Uvir: ",Uvir.shape) - - #Amplitudes: - A=np.zeros((Uvir.shape[0]*Vvir.shape[1],Uvir.shape[1]),dtype=np.complex128) #(n*J)*N - #print("Size A: ",A.shape) - b=np.zeros(Uvir.shape[0]*Vvir.shape[1],dtype=Vvir.dtype)#(n*J)*1 - for i in range(Vvir.shape[1]): - A[i*Uvir.shape[0]:(i+1)*Uvir.shape[0],:]=Uvir@np.diag(eigval**i) - b[i*Uvir.shape[0]:(i+1)*Uvir.shape[0]]=Vvir[:,i] -# print(A[:Uvir.shape[0],:]) - Ua,sa,Wa=np.linalg.svd(A,full_matrices=False) #(n*J)*N, N*N, N*N - a=Wa.conj().T@np.diag(sa**-1)@Ua.conj().T@b #N - -# print(eigval) - #Modes - uvir=Uvir@np.diag(a) #n*N - u=U@uvir #I*N - areal=np.linalg.norm(u,axis=0)/np.sqrt(V.shape[0]) - #print("Size ufull: ",u.shape) - - #Spectral complexity: - kk3=0 - for m in range(0,np.size(areal)): - if areal[m]/np.max(areal)>edmd: - kk3=kk3+1 - st.write(f'Spectral complexity: {kk3}') - - idx=np.flip(np.argsort(areal)) - u=u[:,idx] - areal=areal[idx] - eigval=eigval[idx] - delta=delta[idx] - omega=omega[idx] - - #Filter important ones: - mask=(areal/areal[0])>edmd - - #Mode Matrix: - ModeMatr=np.zeros((kk3,4)) - for ii in range(0,kk3): - ModeMatr[ii,0]=ii+1 - ModeMatr[ii,1]=delta[mask][ii] - ModeMatr[ii,2]=omega[mask][ii] - ModeMatr[ii,3]=areal[mask][ii] - # print('Mode Number, GrowthRate, Frequency and Amplitude of each mode:') - # print(ModeMatr) - - #Calculate DMD modes: - u=u[:,mask] - U = U[:,0:kk] - DMDmode=np.zeros((V.shape[0],kk3),dtype=np.complex128) - Amplitude0=np.zeros(kk3) - for m in range(0,kk3): - NormMode=np.linalg.norm(np.dot(U.T,u[:,m]),ord=2)/np.sqrt(V.shape[0]) - Amplitude0[m]=NormMode - DMDmode[:,m]=u[:,m]/NormMode - - return u,areal[mask],eigval[mask],delta[mask],omega[mask],DMDmode - -def tensorHODMD(V,d,t,esvd,edmd): - '''High order (d) modal decomposition for tensors: - Input: - -V (I1xI2x...xJ): Snapshots. - -t (J): Time vector. - -d: parameter of DMD-d, higher order Koopman assumption (int>=1). - -esvd: first tolerance (SVD). - -edmd: second tolerance (DMD-d modes). - Output: - -u (I1xI2x...xn): mode tensor (columns) sorted from biggest to smallest amplitude, put to scale. - -areal (n): amplitude vector (sorted) used to put u to scale. - -eigval: eigenvalues - -delta (n): growth rate vector. - -omega (n): frequency vector. - -DMDmode: Modes''' - - #Reduced snapshots: - S,Vl,sl=truncHOSVD(V,esvd) #I*n, n*n, n*J - Svir=S.copy() - for i in range(S.ndim-1): - Svir=tensormatmul(Svir,Vl[i],i) - Svir=Svir/sl[-1] -# Vl[-1]=(sl[-1]*Vl[-1]).T - - uvir,a,eigval,delta,omega,DMDmode=hodmd((sl[-1]*Vl[-1]).T,d,t,esvd,edmd) - u=tensormatmul(Svir,uvir.T,V.ndim-1) - - return u,a,eigval,delta,omega,DMDmode - -def remake(u,t,mu): - '''Reconstructs original data from DMD-d results: - Input: - -u (Ixn): Mode matrix (columns). - -t (J): Time vector. - -delta (n): vector de ratios de crecimiento. - -omega (n): vector de frecuencias. - -mu: np.exp(np.dot((t[1]-t[0]),delta[iii]+np.dot(complex(0,1),omega[iii]))) - Output: - -vrec (IxJ): reconstructed snapshots''' - - vrec=np.zeros((u.shape[0],t.size),dtype=np.complex128) - for i in range(t.size): - for j in range(u.shape[1]): - vrec[:,i]+=u[:,j]*mu[j]**i#*np.exp((delta[j]+omega[j]*1j)*t[i]) - return vrec - -#@nb.njit(cache=True) -def remakeTens(u,t,mu): - '''Reconstructs original data from DMD-d results: - Input: - -u (Ixn): Mode matrix (columns). - -t (J): Time vector. - -delta (n): vector de ratios de crecimiento. - -omega (n): vector de frecuencias. - -mu: np.exp(np.dot((t[1]-t[0]),delta[iii]+np.dot(complex(0,1),omega[iii]))) - Output: - -vrec (IxJ): reconstructed snapshots''' - - shape=np.array(u.shape,dtype=np.int32) - shape[-1]=t.size - vrec=np.zeros(tuple(shape),dtype=np.complex128) - idx=[slice(None)]*shape.size - for i in range(t.size): - for j in range(u.shape[-1]): - idx[-1]=i - vrec[tuple(idx)]+=u.take(j,axis=-1)*mu[j]**i#*np.exp((delta[j]+omega[j]*1j)*t[i]) - return vrec - -def hodmd_IT(Vvir,d,t,esvd,edmd): - '''High order (d) modal decomposition: - Input: - -V (IxJ): snapshot matrix. - -t (J): time vector. - -d: parameter of DMD-d, higher order Koopman assumption (int>=1). - -esvd: first tolerance (SVD). - -edmd: second tolerance (DMD-d modes). - Output: - -u (Ixn): mode matrix (columns) sorted from biggest to smallest amplitude, put to scale. - -areal (n): amplitude vector (sorted) used to put u to scale. - -eigval: eigenvalues - -delta (n): growth rate vector. - -omega (n): frequency vector. - -DMDmode: Modes''' - dt=t[1]-t[0] - N=Vvir.shape[0] - K=Vvir.shape[1] - - #Reduced and grouped snapshots: - Vdot=np.zeros((d*N,Vvir.shape[1]-d+1),dtype=Vvir.dtype) #(d*n)*(J-d+1) - for j in range(d): - Vdot[j*N:(j+1)*N,:]=Vvir[:,j:Vvir.shape[1]-d+j+1] - #print("Size Vdot: ",Vdot.shape) - - #Reduced, grouped and again reduced snapshots: - Udot,sdot,Whdot=truncatedSVD(Vdot,esvd) #(d*n)*N, N*N, N*(J-d+1) - Vvd=np.diag(sdot)@Whdot #N*(J-d+1) - #print("Size Vvd: ",Vvd.shape) - - #Spatial dimension reduction: - st.write(f'Spatial dimension reduction: {np.size(sdot)}') - - #Koopman matrix reconstruction: - Uvd,svd,Whvd=np.linalg.svd(Vvd[:,:-1],full_matrices=False) #N*r', r'*r', r'*(J-d) - Rvd=Vvd[:,1:]@Whvd.conj().T@np.diag(svd**-1)@Uvd.conj().T #r'*r' - eigval,eigvec=np.linalg.eig(Rvd) #r'*r'==N*N - #print("Size Rvd: ",Rvd.shape) - - #Frequencies and growthrate: - delta=np.log(eigval).real/dt #r' - omega=np.log(eigval).imag/dt #r' - - #Modes - q=(Udot@eigvec)[(d-1)*N:d*N,:] #Taking steps back n*N - Uvir=q/np.linalg.norm(q,axis=0) #n*N - #print("Size Uvir: ",Uvir.shape) - - #Amplitudes: - A=np.zeros((Uvir.shape[0]*Vvir.shape[1],Uvir.shape[1]),dtype=np.complex128) #(n*J)*N - #print("Size A: ",A.shape) - b=np.zeros(Uvir.shape[0]*Vvir.shape[1],dtype=Vvir.dtype)#(n*J)*1 - for i in range(Vvir.shape[1]): - A[i*Uvir.shape[0]:(i+1)*Uvir.shape[0],:]=Uvir@np.diag(eigval**i) - b[i*Uvir.shape[0]:(i+1)*Uvir.shape[0]]=Vvir[:,i] -# print(A[:Uvir.shape[0],:]) - Ua,sa,Wa=np.linalg.svd(A,full_matrices=False) #(n*J)*N, N*N, N*N - a=Wa.conj().T@np.diag(sa**-1)@Ua.conj().T@b #N - -# print(eigval) - #Modes - u=np.zeros((np.shape(Uvir)[0],np.size(eigval)),dtype=np.complex128) - for m in range(0,np.size(eigval)): - u[:,m]=np.dot(a[m],Uvir[:,m]) - #uvir=Uvir@np.diag(a) #n*N - #u=U@uvir #I*N - #areal=np.linalg.norm(u,axis=0)/np.sqrt(V.shape[0]) - #print("Size ufull: ",u.shape) - areal=np.zeros(np.size(eigval),dtype=np.complex128) - for mm in range(0,np.size(eigval)): - GR=delta[mm] - AmplGR=np.exp(np.dot(GR,t)) - AmplN=np.linalg.norm(AmplGR,ord=2)/np.sqrt(K) - areal[mm]=np.dot(np.linalg.norm(u[:,mm],ord=2),AmplN) - areal=np.real(areal) - - idx=np.flip(np.argsort(areal,axis=0)) - u=u[:,idx] - areal=areal[idx] - eigval=eigval[idx] - delta=delta[idx] - omega=omega[idx] - - #Spectral complexity: - kk3=0 - for m in range(0,np.size(areal)): - if areal[m]/np.max(areal)>edmd: - kk3=kk3+1 - st.write(f'Spectral complexity: {kk3}') - - return u[:,0:kk3],areal[0:kk3],eigval[0:kk3],delta[0:kk3],omega[0:kk3] - -def remakeTens_IT(t,t0,u,delta,omega): - '''Reconstructs original data from DMD-d results: - Input: - -u (Ixn): Mode matrix (columns). - -t (J): Time vector. - -delta (n): vector de ratios de crecimiento. - -omega (n): vector de frecuencias. - -mu: np.exp(np.dot((t[1]-t[0]),delta[iii]+np.dot(complex(0,1),omega[iii]))) - Output: - -vrec (IxJ): reconstructed snapshots''' - - dt=t-t0 - icomp=complex(0,1) - mu=np.zeros(np.size(delta),dtype=np.complex128) - for iii in range(0,np.size(delta)): - mu[iii] = np.exp(np.dot(dt,delta[iii]+np.dot(icomp,omega[iii]))) - return np.dot(u,mu) - -def reconst_IT(hatMode,Time,U,S,sv,nn,TimePos,GrowthRate,Frequency): - '''Reconstructs original data from DMD-d results.''' - N = np.shape(hatMode)[0] - K = matlen(Time) - hatTReconst = np.zeros((N,K), dtype=np.complex128) - - # Reconstruction using the DMD expansion: - for k in range(0,K): - hatTReconst[:,k] = remakeTens_IT(Time[k],Time[0],hatMode,GrowthRate,Frequency) - - # Reconstruction of the original tensor using the reduced tensor and the tensor core: - Unondim = U - print(f'hatTReconst size: {np.shape(hatTReconst)}') - UTnondim = np.zeros(shape=np.shape(U), dtype=object) - UTnondim[0][TimePos-1] = np.zeros((nn[TimePos-1],np.shape(hatTReconst)[1]),dtype=np.complex128) - for kk in range(0,nn[TimePos-1]): - UTnondim[0][TimePos-1][kk,:] = hatTReconst[kk,:]/sv[0][TimePos-1][kk] - - Unondim[0][TimePos-1] = np.transpose(UTnondim[0][TimePos-1]) - TensorReconst = tprod(S,Unondim) - return TensorReconst - -def modes_IT(N,hatMode,Amplitude,U,S,nn,TimePos): - '''Calculate DMD modes from results''' - hatMode_m = np.zeros((N,np.size(Amplitude)), dtype=hatMode.dtype) - for ii in range(0,np.size(Amplitude)): - hatMode_m[:,ii] = hatMode[:,ii]/Amplitude[ii] - - ModesT = np.zeros((nn[TimePos-1],np.shape(hatMode_m)[1]), dtype=hatMode_m.dtype) - for kk in range(0,nn[TimePos-1]): - ModesT[kk,:] = hatMode_m[kk,:] - - # Temporal DMD modes in reduced dimension: - Modes = U - Modes[0,TimePos-1] = np.transpose(ModesT) - - # Reconstruction of the temporal DMD modes: - DMDmode = tprod(S,Modes) - - return DMDmode \ No newline at end of file diff --git a/v0.1_ModelFLOWs_web/DS_30_Tensor.pkl b/v0.1_ModelFLOWs_web/DS_30_Tensor.pkl deleted file mode 100644 index 65b6a78..0000000 --- a/v0.1_ModelFLOWs_web/DS_30_Tensor.pkl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2d0b362ac33fe61734cc579a2dce9469bc3e001490362c3fa75d13fb2660275f -size 36864168 diff --git a/v0.1_ModelFLOWs_web/DS_30_Tensor_cylinder_Re100.mat b/v0.1_ModelFLOWs_web/DS_30_Tensor_cylinder_Re100.mat deleted file mode 100644 index 65d9b24..0000000 Binary files a/v0.1_ModelFLOWs_web/DS_30_Tensor_cylinder_Re100.mat and /dev/null differ diff --git a/v0.1_ModelFLOWs_web/FullDL_page.py b/v0.1_ModelFLOWs_web/FullDL_page.py deleted file mode 100644 index e317bf2..0000000 --- a/v0.1_ModelFLOWs_web/FullDL_page.py +++ /dev/null @@ -1,923 +0,0 @@ -import streamlit as st -import numpy as np -import pandas as pd -import matplotlib.pyplot as plt -import time -import math -import scipy.io -import sys, os -import data_fetch - -from sklearn.metrics import mean_squared_error, mean_absolute_error -from sklearn.metrics import median_absolute_error, r2_score - -# Remove logs from tensorflow about CUDA -import logging, os -logging.disable(logging.WARNING) -os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" - -import tensorflow as tf -from tensorflow import keras - -from tensorflow.keras.layers import Dense, Reshape, BatchNormalization -from tensorflow.keras.layers import TimeDistributed, LSTM, Flatten, Conv3D -from tensorflow.keras.layers import MaxPooling3D, Permute, Input -from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping - -from tensorflow.keras.models import Model - -################################################################################ -# Load Data and Preprocess -################################################################################ - -def load_preprocess(tensor_train: np.ndarray, tensor_test: np.ndarray, - train_size: float = 0.75, val_size: float = 0.25, - batch_size: int = 8, model_type = 'rnn'): - - # Use to raise errors - flag = {'check': True, - 'text': None} - outputs = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] - outputs[0] = flag - - if (train_size + val_size != 1.0): - - flag = {'check': False, - 'text': "Error: Params 'train_size', 'val_size' " + - "must add up to 1, e.g., train_size = 0.8, " + - "val_size = 0.2"} - outputs[0] = flag - - return outputs - - if (tensor_train.ndim != 4 or tensor_test.ndim != 4): - - flag = {'check': False, - 'text': "Error: Params 'tensor_train' and 'tensor_test' has to have a " + - "number of dimensions equal to 4, and the temporal dimension has to " + - "be the last one"} - outputs[0] = flag - - return outputs - - if (model_type == 'cnn'): - - min_val = np.amin(tensor_train) - range_val = np.ptp(tensor_train) - - elif (model_type == 'rnn'): - - min_val = 0 - range_val = 1 - - else: - - flag = {'check': False, - 'text': "Error: Param 'model_type' has to be an string equal to " + - "'cnn' or 'rnn'"} - outputs[0] = flag - - return outputs - - - tensor_train_norm = (tensor_train - min_val)/range_val - tensor_test_norm = (tensor_test - min_val)/range_val - - # Dataset configuration - total_length = tensor_train_norm.shape[3] - channels_n = tensor_train_norm.shape[0] - dim_x = tensor_train_norm.shape[1] - dim_y = tensor_train_norm.shape[2] - - """ - Data Generator (Rolling Window) - """ - if (model_type == 'rnn'): - class DataGenerator(tf.keras.utils.Sequence): - 'Generates data for Keras' - def __init__(self, data, list_IDs, batch_size=5, dim=(2,35,50), k = 624, - p = 1, shuffle=True, till_end = False, only_test = False): - 'Initialization' - self.data = data - self.dim = dim - self.batch_size = batch_size - self.list_IDs = list_IDs - self.shuffle = shuffle - self.p = p - self.k = k - self.till_end = till_end - self.only_test = only_test - self.on_epoch_end() - - def __len__(self): - 'Denotes the number of batches per epoch' - if self.till_end: - lenx = math.ceil((len(self.list_IDs) / self.batch_size)) - else: - lenx = int(np.floor(len(self.list_IDs) / self.batch_size)) - return lenx - - def __getitem__(self, index): - 'Generate one batch of data' - # Generate indexes of the batch - indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size] - - # Find list of IDs - list_IDs_temp = [self.list_IDs[k] for k in indexes] - - # Generate data - X, y = self.__data_generation(list_IDs_temp) - if self.only_test: - return X - else: - return X, y - - def on_epoch_end(self): - 'Updates indexes after each epoch' - self.indexes = np.arange(len(self.list_IDs)) - if self.shuffle == True: - np.random.shuffle(self.indexes) - - def __data_generation(self, list_IDs_temp): - 'Generates data containing batch_size samples' # X : (n_samples, *dim, depth) - # Initialization - X = np.empty((self.batch_size, *self.dim, self.k)) - y = [np.empty((self.batch_size, *self.dim))]*self.p - - y_inter = np.empty((self.batch_size, *self.dim, p)) - - # Generate data - lenn = len(list_IDs_temp) - for i, ID in enumerate(list_IDs_temp): - # Store Xtrain - X[i,:,:,:,:] = self.data[:,:,:,ID:ID+k] - # Store Ytrain - y_inter[i,:,:,:,:] = self.data[:,:,:,ID+k:ID+k+p] - - for j in range(self.p): - y[j] = y_inter[:,:,:,:,j] - y[j] = np.reshape(y[j], (lenn, -1)) - - X = X.transpose((0,4,2,3,1)) - X = np.reshape(X, (X.shape[0],X.shape[1],-1)) - - return X, y - - elif (model_type == 'cnn'): - - class DataGenerator(tf.keras.utils.Sequence): - 'Generates data for Keras' - # IMPORTANT: In Synthetic jet: 1 piston cycle=624 snapshots---SET k=624 - def __init__(self, data, list_IDs, batch_size=5, dim=(2,35,50), - k = 624, p = 1, - shuffle=True, till_end = False, only_test = False): - 'Initialization' - self.data = data - self.dim = dim - self.batch_size = batch_size - self.list_IDs = list_IDs - self.shuffle = shuffle - self.p = p - self.k = k - self.till_end = till_end - self.only_test = only_test - self.on_epoch_end() - - def __len__(self): - 'Denotes the number of batches per epoch' - if self.till_end: - lenx = math.ceil((len(self.list_IDs) / self.batch_size)) - else: - lenx = int(np.floor(len(self.list_IDs) / self.batch_size)) - return lenx - - def __getitem__(self, index): - 'Generate one batch of data' - # Generate indexes of the batch - indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size] - - # Find list of IDs - list_IDs_temp = [self.list_IDs[k] for k in indexes] - - # Generate data - X, y = self.__data_generation(list_IDs_temp) - if self.only_test: - return X - else: - return X, y - - def on_epoch_end(self): - 'Updates indexes after each epoch' - self.indexes = np.arange(len(self.list_IDs)) - if self.shuffle == True: - np.random.shuffle(self.indexes) - - def __data_generation(self, list_IDs_temp): - 'Generates data containing batch_size samples' # X : (n_samples, *dim, depth) - # Initialization - X = np.empty((self.batch_size, *self.dim, self.k)) - y = [np.empty((self.batch_size, *self.dim))]*self.p - - y_inter = np.empty((self.batch_size, *self.dim, p)) - - # Generate data - lenn = len(list_IDs_temp) - for i, ID in enumerate(list_IDs_temp): - # Store Xtrain - X[i,:,:,:,:] = self.data[:,:,:,ID:ID+k] - # Store Ytrain - y_inter[i,:,:,:,:] = self.data[:,:,:,ID+k:ID+k+p] - - for j in range(self.p): - y[j] = y_inter[:,:,:,:,j] - y[j] = np.reshape(y[j], (lenn, -1)) - - X = X.transpose((0,4,2,3,1)) - - return X, y - - - """ - Create training, validation and test sets - """ - - # Prepare the dataset indexes - period_transitorio = 0 - stride_train = 1 - stride_val = 1 - stride_test = 1 - - dim=(channels_n, dim_x, dim_y) - - k = 10 # number of snapshots used as predictors - p = 2 # number of snapshots used as time-ahead predictions - - test_length = tensor_test.shape[-1] - val_length = int(val_size * total_length) - train_length = int(train_size * total_length) - - if(batch_size < 0 or - not isinstance(batch_size, int) or - batch_size > np.min([val_length,train_length])-k-1): - - flag = {'check': False, - 'text': "Error: Param 'batch_size' has to be an integer " + - "number greater than 0 and, in this case, lower than or equal to " + - f"{np.min([val_length,train_length])-k-1}"} - outputs[0] = flag - - return outputs - - if int(train_length-period_transitorio-(k+p)) < 0: - train_n = 0 - elif int((train_length-period_transitorio-(k+p))//stride_train) == 0: - train_n = 1 - else: - train_n = int(((train_length-period_transitorio)-(k+p))//stride_train) + 1 - - if int(test_length-period_transitorio-(k+p)) < 0: - test_n = 0 - elif int((test_length-period_transitorio-(k+p))//stride_test) == 0: - test_n = 1 - else: - test_n = int((test_length-period_transitorio-(k+p))//stride_test) + 1 - - if int(val_length-(k+p)) < 0: - val_n = 0 - elif int((val_length-(k+p))//stride_val) == 0: - val_n = 1 - else: - val_n = int((val_length-(k+p))//stride_val) + 1 - - # Indices for the beginnings of each batch - train_idxs = np.empty([train_n], dtype='int') - val_idxs = np.empty([val_n], dtype='int') - test_idxs = np.empty([test_n], dtype='int') - - j = period_transitorio - for i in range(train_n): - train_idxs[i] = j - j = j+stride_train - - j = train_length - for i in range(val_n): - val_idxs[i] = j - j = j+stride_val - - j = 0 - for i in range(test_n): - test_idxs[i] = j - j = j+stride_test - - # Generators - training_generator = DataGenerator(tensor_train_norm, train_idxs, - dim = dim, - batch_size = batch_size, - k = k, p = p, till_end = False, - only_test = False, - shuffle = True) - validation_generator = DataGenerator(tensor_train_norm, val_idxs, - dim = dim, - batch_size = batch_size, - k = k, p = p, till_end = False, - only_test = False, - shuffle = False) - - test_generator = DataGenerator(tensor_test_norm, test_idxs, - dim = dim, - batch_size = batch_size, - k = k, p = p, till_end = False, - only_test = True, - shuffle = False) - """ - print ('test_length: ', test_length) - print ('val_length: ', val_length) - print ('train_length: ', train_length) - print() - print ('test_n: ', test_n) - print ('val_n: ', val_n) - print ('train_n: ', train_n) - print() - print('test_generator_len: ', len(test_generator)) - print('validation_generator_len: ', len(validation_generator)) - print('training_generator_len: ', len(training_generator)) - """ - # preparar Ytest - test_n_adjusted = int(test_n/batch_size)*batch_size - Ytest = [np.empty([test_n_adjusted, channels_n, dim_x, dim_y], dtype='float64')] * p - Ytest_fl = [np.empty([test_n_adjusted, channels_n * dim_x * dim_y ], dtype='float64')] * p - - Ytest_inter = np.empty([test_n_adjusted, channels_n, dim_x, dim_y, p], dtype='float64') - - for i in range(test_n_adjusted): - j = test_idxs[i] - Ytest_inter[i,:,:,:,:] = tensor_test_norm[:,:,:,j+k:j+k+p] - - for r in range(p): - Ytest[r] = Ytest_inter[:,:,:,:,r] - Ytest_fl[r] = np.copy(np.reshape(Ytest[r], (test_n_adjusted, -1))) - - outputs = [ - flag, training_generator, validation_generator, test_generator, tensor_test, - tensor_test_norm, min_val, range_val, dim_x, dim_y, channels_n, Ytest, - Ytest_fl, k, p - ] - - return outputs - -################################################################################ -# RNN Model -################################################################################ - -def lstm_model(neurons, lr, shared_dim, act_fun, act_fun2, num_epochs, k, p, dim_x, dim_y, channels_n, training_generator, - validation_generator, path_saved: str = './'): - - flag = {'check': True, - 'text': None} - - if (num_epochs <= 0 or not isinstance(num_epochs, int)): - - flag = {'check': False, - 'text': "Error: Param 'num_epochs' has to be an integer " + - "number greater than 0, e.g, num_epochs = 70"} - - return [flag, 0, 0] - - if (not isinstance(path_saved, str)): - - flag = {'check': False, - 'text': "Error: Param 'path_saved' has to be an string " + - "idicating a path to save the files, e.g, path_saved = './"} - - return [flag, 0, 0] - - # Reset Model - tf.keras.backend.clear_session - - # Create Model - def create_model(neurons, lr, in_shape, out_dim, shared_dim, act_fun, act_fun2, p = 3): - neurons = int(neurons) - x = Input(shape=in_shape) - - v = LSTM(neurons)(x) - v = Dense(p*neurons, activation= act_fun)(v) - v = Reshape((p,neurons))(v) - - tt = [1]*p - - r = TimeDistributed( Dense(shared_dim, activation=act_fun))(v) - s = tf.split(r, tt, 1) - for i in range(p): - s[i] = Flatten()(s[i]) - - o = [] - for i in range(p): - o.append( Dense(out_dim, activation=act_fun2)(s[i])) - - m = Model(inputs=x, outputs=o) - - opt = keras.optimizers.Adam(learning_rate=lr) - m.compile(loss='mse', optimizer=opt, metrics=['mae']) - return(m) - - in_shape = [k, dim_x * dim_y * channels_n] - out_dim = dim_x * dim_y * channels_n - - model= create_model(neurons, lr, in_shape, out_dim, shared_dim, act_fun, act_fun2, p) - - save_string = path_saved + 'lstm_model' - - # save the best weights - save_best_weights = save_string + '.h5' - save_summary_stats = save_string + '.csv' - save_last_weights = save_string + '_last_w.h5' - save_results_metrics = save_string + '_results_metrics.csv' - - """ - Training - """ - - print("\n########################") - print("TRAINING - RNN Model") - print("########################\n") - - np.random.seed(247531338) - - t0 = time.time() - # Model training - callbacks = [ModelCheckpoint( - save_best_weights, - monitor='val_loss', - save_best_only=True, - mode='auto'), - EarlyStopping( - monitor='val_loss', - patience=10, - verbose=1, - mode='auto', - min_delta = 0.001) - ] - - model.summary(print_fn=lambda x: st.text(x)) - - history = model.fit(training_generator, - validation_data=validation_generator, - epochs=num_epochs, - verbose=2, - callbacks=callbacks) - t1 = time.time() - print("Minutes elapsed for training: %f" % ((t1 - t0) / 60.)) - - """ - Save Model - """ - - # save the last weights - model.save_weights(save_last_weights) - - # Aggregate the summary statistics - summary_stats = pd.DataFrame({'epoch': [ i + 1 for i in history.epoch ], - 'train_loss': history.history['loss'], - 'valid_loss': history.history['val_loss']}) - - summary_stats.to_csv(save_summary_stats) - fig, ax = plt.subplots() - ax.plot(summary_stats.train_loss, 'b', label = 'Training loss') # blue - ax.plot(summary_stats.valid_loss, 'g--', label = 'Validation loss') # green - ax.grid() - ax.legend() - figName = path_saved + "loss_evolution_lstm_model.png" - plt.savefig(figName, format = 'svg') - st.pyplot(fig) - - # Find the min validation loss during the training - # min_loss, idx = min((loss, idx) for (idx, loss) in enumerate(history.history['val_loss'])) - # print('Minimum val_loss at epoch', '{:d}'.format(idx+1), '=', '{:.4f}'.format(min_loss)) - - return flag, model, save_best_weights - -################################################################################ -# CNN Model -################################################################################ - -def cnn_model(shared_dim, lr, act_fun, act_fun2, num_epochs, k, p, dim_x, dim_y, channels_n, training_generator, - validation_generator, path_saved: str = './'): - - flag = {'check': True, - 'text': None} - - if (num_epochs <= 0 or not isinstance(num_epochs, int)): - - flag = {'check': False, - 'text': "Error: Param 'num_epochs' has to be an integer " + - "number greater than 0, e.g, num_epochs = 70"} - - return [flag, 0, 0] - - if (not isinstance(path_saved, str)): - - flag = {'check': False, - 'text': "Error: Param 'path_saved' has to be an string " + - "idicating a path to save the files, e.g, path_saved = './"} - - return [flag, 0, 0] - - # Reset Model - tf.keras.backend.clear_session - - # Create Model - def create_model(in_shape, out_dim, lr, p = 3, shared_dim = shared_dim, act_fun= act_fun, act_fun2 = act_fun2): - x = Input(shape=in_shape) - Fm = Input(shape=in_shape) - - v = Conv3D(5, - kernel_size=(2,2,2), - activation=act_fun, - input_shape=in_shape, - data_format='channels_last')(x) - v = MaxPooling3D(pool_size=(1,2,2), - padding='valid', - data_format='channels_last', dtype='float32')(v) - v = BatchNormalization()(v) - v = Conv3D(10, - kernel_size=(2,2,2), - activation=act_fun, - input_shape=in_shape, - data_format='channels_last')(v) - v = MaxPooling3D(pool_size=(1,2,2), - padding='valid', - data_format='channels_last', dtype='float32')(v) - v = BatchNormalization()(v) - v = Conv3D(20, - kernel_size=(2,2,2), - activation=act_fun, - input_shape=in_shape, - data_format='channels_last')(v) - v = MaxPooling3D(pool_size=(1,2,2), - padding='valid', - data_format='channels_last', dtype='float32')(v) - v = BatchNormalization()(v) - v = Conv3D(p, - kernel_size=(1,1,1), - activation=act_fun, - input_shape=in_shape, - data_format='channels_last')(v) - v = Permute((4,1,2,3))(v) - v = Reshape((p,v.shape[2]*v.shape[3]*v.shape[4]))(v) - - tt = [1]*p - - r = TimeDistributed(Dense(shared_dim, activation=act_fun))(v) - s = tf.split(r, tt, 1) - for i in range(p): - s[i] = Flatten()(s[i]) - - o = [] - for i in range(p): - o.append( Dense(out_dim, activation=act_fun2)(s[i]) ) - - m = Model(inputs=x, outputs=o) - optimizer = tf.keras.optimizers.Adam(learning_rate = lr) - m.compile(loss='mse', optimizer=optimizer, metrics=['mse']) - return(m) - - in_shape = [k, dim_x, dim_y, channels_n] - out_dim = dim_x * dim_y * channels_n - - # Define model - model= create_model(in_shape, out_dim, lr, p, shared_dim, act_fun, act_fun2) - - save_string = path_saved + 'cnn_model' - - # save the best weights - save_best_weights = save_string + '.h5' - save_summary_stats = save_string + '.csv' - save_last_weights = save_string + '_last_w.h5' - save_results_metrics = save_string + '_results_metrics.csv' - - """ - Training - """ - - print("\n########################") - print("TRAINING - CNN Model") - print("########################\n") - - np.random.seed(247531338) - - t0 = time.time() - # Model training - callbacks = [ModelCheckpoint( - save_best_weights, - monitor='val_loss', - save_best_only=True, - mode='auto') - ] - - model.summary(print_fn=lambda x: st.text(x)) - - history = model.fit(training_generator, - validation_data=validation_generator, - epochs=num_epochs, - verbose=2, - callbacks=callbacks) - t1 = time.time() - print("Minutes elapsed for training: %f" % ((t1 - t0) / 60.)) - - """ - Save Model - """ - - # save the last weights - model.save_weights(save_last_weights) - - # Aggregate the summary statistics - summary_stats = pd.DataFrame({'epoch': [ i + 1 for i in history.epoch ], - 'train_loss': history.history['loss'], - 'valid_loss': history.history['val_loss']}) - - summary_stats.to_csv(save_summary_stats) - - fig, ax = plt.subplots() - ax.plot(summary_stats.train_loss, 'b', label = 'Training loss') # blue - ax.plot(summary_stats.valid_loss, 'g--', label = 'Validation loss') # green - ax.grid() - ax.legend() - figName = path_saved + "loss_evolution_cnn_model.png" - plt.savefig(figName, format = 'svg') - st.pyplot(fig) - - # Find the min validation loss during the training - # min_loss, idx = min((loss, idx) for (idx, loss) in enumerate(history.history['val_loss'])) - # print('Minimum val_loss at epoch', '{:d}'.format(idx+1), '=', '{:.4f}'.format(min_loss)) - - return flag, model, save_best_weights - -################################################################################ -# Inference -################################################################################ - -def RRMSE(Tensor0, Reconst): - RRMSE = np.linalg.norm(np.reshape(Tensor0-Reconst,newshape=(np.size(Tensor0),1)),ord=2)/np.linalg.norm(np.reshape(Tensor0,newshape=(np.size(Tensor0),1))) - return(RRMSE) - -def smape(A, F): - return ((100.0/len(A)) * - np.sum(2 * np.abs(F - A) / (np.abs(A) + np.abs(F))+ np.finfo(float).eps)) - -def inference(model, save_best_weights: str, test_generator, - Ytest_fl: np.ndarray, Ytest: np.ndarray, min_val: float, - range_val: float, p: int, path_saved: str = './', - model_type = 'rnn'): - - print("\n###################") - print("INFERENCE") - print("###################\n") - - t0 = time.time() - # Load model with best weights' configuration obtain from training - model.load_weights(save_best_weights) - Ytest_hat_fl = model.predict( - test_generator, - max_queue_size=10, - workers=6, - use_multiprocessing=False, - verbose=0) - - t1 = time.time() - print("Minutes elapsed to forecast: %f" % ((t1 - t0) / 60.)) - - # print('Error measure of the first prediction for each sample on Test set') - lag = 0 - num_sec = Ytest_hat_fl[0].shape[0] - results_table = pd.DataFrame(index=['MSE','MAE','MAD','R2','SMAPE','RRMSE'],columns=range(num_sec)) - for i in range(num_sec): - results_table.iloc[0,i] = mean_squared_error(Ytest_fl[lag][i,:], Ytest_hat_fl[lag][i,:]) - results_table.iloc[1,i] = mean_absolute_error(Ytest_fl[lag][i,:], Ytest_hat_fl[lag][i,:]) - results_table.iloc[2,i] = median_absolute_error(Ytest_fl[lag][i,:], Ytest_hat_fl[lag][i,:]) - results_table.iloc[3,i] = r2_score(Ytest_fl[lag][i,:], Ytest_hat_fl[lag][i,:]) - results_table.iloc[4,i] = smape(Ytest_fl[lag][i,:], Ytest_hat_fl[lag][i,:]) - results_table.iloc[5,i] = RRMSE(np.reshape(Ytest_fl[lag][i,:],(-1,1)), np.reshape(Ytest_hat_fl[lag][i,:],(-1,1))) - - # print(results_table) - savename = path_saved + "table_" + model_type + f"_first_prediction.csv" - results_table.to_csv(savename, index=True) - - # print('Error measure of the second prediction for each sample on Test set') - lag = 1 - num_sec = Ytest_hat_fl[0].shape[0] - results_table = pd.DataFrame(index=['MSE','MAE','MAD','R2','SMAPE','RRMSE'],columns=range(num_sec)) - for i in range(num_sec): - results_table.iloc[0,i] = mean_squared_error( Ytest_fl[lag][i,:], Ytest_hat_fl[lag][i,:]) - results_table.iloc[1,i] = mean_absolute_error( Ytest_fl[lag][i,:], Ytest_hat_fl[lag][i,:]) - results_table.iloc[2,i] = median_absolute_error( Ytest_fl[lag][i,:], Ytest_hat_fl[lag][i,:]) - results_table.iloc[3,i] = r2_score( Ytest_fl[lag][i,:], Ytest_hat_fl[lag][i,:]) - results_table.iloc[4,i] = smape( Ytest_fl[lag][i,:], Ytest_hat_fl[lag][i,:]) - results_table.iloc[5,i] = RRMSE( np.reshape(Ytest_fl[lag][i,:],(-1,1)), np.reshape(Ytest_hat_fl[lag][i,:],(-1,1))) - - # print(results_table) - savename = path_saved + "table_" + model_type + f"_second_prediction.csv" - results_table.to_csv(savename, index=True) - - print('Performance measures on Test data, for all time, per time-ahead lag') - results_table_global = pd.DataFrame(index=['MSE','MAE','MAD','R2','SMAPE','RRMSE'],columns=range(p)) - for i in range(p): - results_table_global.iloc[0,i] = mean_squared_error(Ytest_fl[i].flatten(), Ytest_hat_fl[i].flatten()) - results_table_global.iloc[1,i] = mean_absolute_error(Ytest_fl[i].flatten(), Ytest_hat_fl[i].flatten()) - results_table_global.iloc[2,i] = median_absolute_error(Ytest_fl[i].flatten(), Ytest_hat_fl[i].flatten()) - results_table_global.iloc[3,i] = r2_score(Ytest_fl[i].flatten(), Ytest_hat_fl[i].flatten()) - results_table_global.iloc[4,i] = smape(Ytest_fl[i].flatten(), Ytest_hat_fl[i].flatten()) - results_table_global.iloc[5,i] = RRMSE( np.reshape(Ytest_fl[i].flatten(),(-1,1)), np.reshape(Ytest_hat_fl[i].flatten(),(-1,1))) - - results_table_global['mean'] = results_table_global.mean(axis=1) - - print(results_table_global) - savename = path_saved + "table_" + model_type + f"_mean.csv" - results_table_global.to_csv(savename, index=True) - - # Reshape predictions and targets for plotting - Ytest_hat_lag_0 = np.reshape( - Ytest_hat_fl[0], - (Ytest[0].shape[0], Ytest[0].shape[1], - Ytest[0].shape[2], Ytest[0].shape[3])) - - Ytest_hat_lag_1 = np.reshape( - Ytest_hat_fl[1], - (Ytest[1].shape[0], Ytest[1].shape[1], - Ytest[1].shape[2], Ytest[1].shape[3])) - - Ytest_lag_0 = np.reshape( - Ytest_fl[0], - (Ytest[0].shape[0], Ytest[0].shape[1], - Ytest[0].shape[2], Ytest[0].shape[3])) - - Ytest_lag_1 = np.reshape( - Ytest_fl[1], - (Ytest[1].shape[0], Ytest[1].shape[1], - Ytest[1].shape[2], Ytest[1].shape[3])) - - Ytest_hat_lag_0 = Ytest_hat_lag_0 * range_val + min_val - Ytest_hat_lag_1 = Ytest_hat_lag_1 * range_val + min_val - Ytest_lag_0 = Ytest_lag_0 * range_val + min_val - Ytest_lag_1 = Ytest_lag_1 * range_val + min_val - - return [Ytest_hat_lag_0, Ytest_hat_lag_1, Ytest_lag_0, Ytest_lag_1] - -def plot_results(Ytest_hat_lag_0: np.ndarray, Ytest_hat_lag_1: np.ndarray, - Ytest_lag_0: np.ndarray, Ytest_lag_1: np.ndarray, index: int, - path_saved: str = './', model_type = 'rnn'): - - flag = {'check': True, - 'text': None} - - if (index < 0 or index >= Ytest_lag_0.shape[0] or not isinstance(index, int)): - - flag = {'check': False, - 'text': "Error: Param 'index' has to be an non-negative integer " + - f"number lower than or equal to {Ytest_lag_0.shape[0] - 1} , " + - "e.g, num_epochs = 0"} - - return flag - - fig = plt.figure(figsize=(15,7)) - plt.subplot(2,3,1) - plt.contourf(Ytest_lag_0[index,0,:,:], 10) - plt.title(f"Ground Truth - Sample {2*index+1} of test set", fontsize = 14) - plt.subplot(2,3,2) - plt.contourf(Ytest_hat_lag_0[index,0,:,:], 10) - plt.title(f"Prediction - Sample {2*index+1} of test set", fontsize = 14) - plt.subplot(2,3,3) - plt.contourf(np.abs(Ytest_hat_lag_0[index,0,:,:] - Ytest_lag_0[index,0,:,:]), 10) - plt.title(f"Absolute Error - Sample {2*index+1} of test set", fontsize = 14) - plt.colorbar() - - plt.subplot(2,3,4) - plt.contourf(Ytest_lag_1[index,0,:,:], 10) - plt.title(f"Ground Truth - Sample {2*index+2} of test set", fontsize = 14) - plt.subplot(2,3,5) - plt.contourf(Ytest_hat_lag_1[index,0,:,:], 10) - plt.title(f"Prediction - Sample {2*index+2} of test set", fontsize = 14) - plt.subplot(2,3,6) - plt.contourf(np.abs(Ytest_hat_lag_1[index,0,:,:] - Ytest_lag_1[index,0,:,:]), 10) - plt.title(f"Absolute Error - Sample {2*index+2} of test set", fontsize = 14) - plt.colorbar() - - fig.tight_layout() - - figName = path_saved + "predictions_" + model_type + f"_model_sample_{2*index+1}.svg" - plt.savefig(figName, format = 'svg') - #plt.show() - st.pyplot(fig) - - return flag - -################################################################################ -# Module Main -################################################################################ - -def menu(model_type): - if model_type == 'cnn': - st.title("CNN Model") - st.write(""" -This full deep learning model uses a Convolutional Neural Network (CNN) architecture to predict future snapshots (forecasting). -""") - st.write(" ## CNN Model - Parameter Configuration") - - if model_type == 'rnn': - st.title("RNN Model") - st.write(""" -This full deep learning model uses a Recurrent Neural Network (CNN) architecture to predict future snapshots (forecasting). -""") - st.write(" ## RNN Model - Parameter Configuration") - - path0 = os.getcwd() - - wantedfile = 'Tensor_cylinder_Re100.mat' - - Ten_orig = data_fetch.fetch_data(path0, wantedfile) - - # Hyperparameters - train_size = 0.75 - - tensor_train = Ten_orig[...,:int(train_size*Ten_orig.shape[-1])] - tensor_test = Ten_orig[...,int(train_size*Ten_orig.shape[-1]):] - - val_size = 0.25 - - model_type_ = model_type.upper() - - batch_size = st.slider('Select Batch Size', 0, 8, value = 4, step = 2) - - if model_type == 'cnn': - - num_epochs = st.slider('Select training epochs', 0, 100, value = 3, step = 1) - act_fun = st.selectbox('Select hidden layer activation function', ('relu', 'elu', 'sigmoid', 'softmax', 'tanh')) - act_fun2 = st.selectbox('Select output layer activation function', ('linear', 'tanh', 'relu', 'sigmoid')) - shared_dim = st.slider('Select number of shared dims', 1, 100, value = 30, step = 1) - lr = st.number_input('Select the learning rate', min_value = 0., max_value = 0.1, value = 0.01, format = '%.4f') - - elif model_type == 'rnn': - num_epochs = st.slider('Select training epochs', 0, 200, value = 20, step = 10) - act_fun = st.selectbox('Select hidden layer activation function', ('linear', 'relu', 'elu', 'sigmoid', 'softmax', 'tanh')) - act_fun2 = st.selectbox('Select output layer activation function', ('linear', 'tanh', 'relu', 'sigmoid')) - neurons = st.slider('Select number of neurons per layer', 1, 150, value = 50, step = 1) - shared_dim = st.slider('Select number of shared dims', 1, 150, value = 50, step = 1) - lr = st.number_input('Select the learning rate', min_value = 0., max_value = 0.1, value = 0.01, format = '%.4f') - - - path_saved = f'{path0}/{model_type_}_solution/' - - # Main - go = st.button('Calculate') - if go: - with st.spinner("Please wait while the model is being trained"): - if not os.path.exists(f'{path0}/{model_type_}_solution'): - os.mkdir(f'{path0}/{model_type_}_solution') - flag, training_generator, validation_generator, test_generator, tensor_test, \ - tensor_test_norm, min_val, range_val, dim_x, dim_y, channels_n, Ytest, \ - Ytest_fl, k, p = load_preprocess(tensor_train, tensor_test, - train_size = train_size, val_size = val_size, - batch_size = batch_size, model_type = model_type) - - if (not flag['check']): - print(flag['text']) - - else: - if (model_type == 'rnn'): - - flag, model, save_best_weights = lstm_model(neurons, lr, shared_dim, act_fun, act_fun2, num_epochs, k, p, - dim_x, dim_y, channels_n, training_generator, - validation_generator, path_saved) - st.success('The model has been trained!') - st.write("### Model Training Results - RNN") - - else: - - flag, model, save_best_weights = cnn_model(shared_dim, lr, act_fun, act_fun2, num_epochs, k, p, - dim_x, dim_y, channels_n, training_generator, - validation_generator, path_saved) - st.success('The model has been trained!') - st.write("### Model Training Results - 3D CNN") - - if (not flag['check']): - - print(flag['text']) - - - else: - Ytest_hat_lag_0, Ytest_hat_lag_1, \ - Ytest_lag_0, Ytest_lag_1 = inference(model, - save_best_weights, test_generator, Ytest_fl, Ytest, - min_val, range_val, p, path_saved, model_type) - - # Plot results - for checkPoint in range(3): - index = int(checkPoint) - flag = plot_results(Ytest_hat_lag_0, Ytest_hat_lag_1, - Ytest_lag_0, Ytest_lag_1, index, - path_saved, model_type) - - if (not flag['check']): - print(flag['text']) - - checkPoint = 1 \ No newline at end of file diff --git a/v0.1_ModelFLOWs_web/Gappy_Tensor_cylinder_Re100.mat b/v0.1_ModelFLOWs_web/Gappy_Tensor_cylinder_Re100.mat deleted file mode 100644 index 51b884c..0000000 Binary files a/v0.1_ModelFLOWs_web/Gappy_Tensor_cylinder_Re100.mat and /dev/null differ diff --git a/v0.1_ModelFLOWs_web/IMPORTANT b/v0.1_ModelFLOWs_web/IMPORTANT deleted file mode 100644 index 8570103..0000000 --- a/v0.1_ModelFLOWs_web/IMPORTANT +++ /dev/null @@ -1,5 +0,0 @@ -If you haven't cloned this GitHub repository and instead downloaded the ZIP file, please download the ModelFLOWs-app web-browser version databases from: - -https://drive.google.com/drive/folders/1v-BU5kT8arbDrgGndZir-Et0K4MIrDs1?usp=sharing - -Once the download is complete, replace the files in the v_ModelFLOWs_web folder diff --git a/v0.1_ModelFLOWs_web/ModelFLOWs_web.py b/v0.1_ModelFLOWs_web/ModelFLOWs_web.py deleted file mode 100644 index c7da5e4..0000000 --- a/v0.1_ModelFLOWs_web/ModelFLOWs_web.py +++ /dev/null @@ -1,247 +0,0 @@ -import streamlit as st -from PIL import Image -from streamlit_option_menu import option_menu - - -# Load Pages -import SVD_page -import hodmd_page -import hosvd_page -import gappy_page -import autoencoders_page -import hybRNN_model_page -import hybCNN_model_page -import hodmd_pred_page -import DLsuperresolution_page -import FullDL_page -import hdf5storage -import os -import tensorflow as tf -import pickle -path0 = os.getcwd() - -# Page configuration -st.set_page_config(page_title='ModelFLOWs-app', page_icon = 'thumbnail.png', layout = 'wide', initial_sidebar_state = 'auto') - - -# Sidebar menu layout -with st.sidebar: - selected = option_menu("ModelFLOWs-app", ["About", 'Models'], - icons=['info-square', 'diagram-3'], menu_icon="house", default_index=0) - -if selected == 'About': - col1, col2, col3 = st.columns(3) - image = Image.open('modelflows.jpeg') - col2.image(image, width=200) - st.title('About the ModelFLOWs application') - st.write('This application has been developed by the ModelFLOWs research group') - - st.write(""" -ModelFLOWs is a research group whose main promoter and tutor is Soledad Le Clainche and was formed at the School of Aeronautics Engineering at Universidad Politécnica de Madrid (UPM). -This team uses different data-driven methods, i.e. reduced order models (ROMs) or neural networks (NN), to generate, study and predict databases related to complex flows (turbulent, reactive, etc.). - """) - - st.write(""" -This web-browser version of ModelFLOWs-app is a demo, used to show the structure of the application and how each algorithm works. The datasets used are 'Tensor_cylinder_Re100.mat', -a 2-Dimensional CFD simulation of flow passing a cylinder at Reynolds 100, and Tensor.pkl, a 3-Dimensional CFD simulation. - """) - selection = 'Tensor_cylinder_Re100.mat' - Tensor_ = hdf5storage.loadmat(f'{path0}/{selection}') - data = list(Tensor_.values())[-1] - st.write(f""" -The Tensor_cylinder_Re100.mat has the following shape: - -nv, ny, nx, nt: {data.shape}, where: - """) - st.markdown(""" -- nv is the number of velocity components, also known as variables -- nx is the number of points in the Y axis that form the mesh -- ny is the number of points in the X axis that form the mesh -- nt is the number of temporal components, also known as snapshots - """) - - st.write('Here is a brief example of what this data looks like') - - import matplotlib.pyplot as plt - fig, ax = plt.subplots() - plt.suptitle('Dataset: Tensor_cylinder_Re100.mat') - ax.contourf(data[0, :, :, 0]) - ax.set_title('First velocity component of the first snapshot') - st.pyplot(fig) - - fig, ax = plt.subplots() - plt.suptitle('Dataset: Tensor_cylinder_Re100.mat') - ax.contourf(data[1, :, :, 0]) - ax.set_title('Second velocity component of the first snapshot') - st.pyplot(fig) - - st.write("Other variants of this database are also provided such as:") - st.markdown(''' -- DS_30_Tensor_cylinder_Re100.mat, a downscaled version of the database, which means that the spatial mesh is smaller and, therefore, contains less data -- Gappy_Tensor_cylinder_Re100.mat, a version with the same shape as the original database, but with missing data which is replaced with NaN (Not A Number) values - ''') - - st.write('Here is what these databases look like') - - selection = 'DS_30_Tensor_cylinder_Re100.mat' - Tensor_ = hdf5storage.loadmat(f'{path0}/{selection}') - ds = list(Tensor_.values())[-1] - - selection = 'Gappy_Tensor_cylinder_Re100.mat' - Tensor_ = hdf5storage.loadmat(f'{path0}/{selection}') - gappy = list(Tensor_.values())[-1] - - fig, ax = plt.subplots() - plt.suptitle('Dataset: DS_30_Tensor_cylinder_Re100.mat') - ax.contourf(ds[0, :, :, 0]) - ax.set_title('First velocity component of the first snapshot') - st.pyplot(fig) - - fig, ax = plt.subplots() - plt.suptitle('Dataset: DS_30_Tensor_cylinder_Re100.mat') - ax.contourf(ds[1, :, :, 0]) - ax.set_title('Second velocity component of the first snapshot') - st.pyplot(fig) - - fig, ax = plt.subplots() - plt.suptitle('Dataset: Gappy_Tensor_cylinder_Re100.mat') - ax.contourf(gappy[0, :, :, 0]) - ax.set_title('First velocity component of the first snapshot') - st.pyplot(fig) - - fig, ax = plt.subplots() - plt.suptitle('Dataset: Gappy_Tensor_cylinder_Re100.mat') - ax.contourf(gappy[1, :, :, 0]) - ax.set_title('Second velocity component of the first snapshot') - st.pyplot(fig) - - with open(f'{path0}/Tensor.pkl', 'rb') as file: - Tensor=pickle.load(file) - - Tensor = tf.transpose(Tensor, (0, 2, 1, 3, 4)) - - st.write(f''' -The Tensor.pkl database is used exclusively in the Deep Learning Reconstruction module. This tensor has the following shape: - -nv, nx, ny, nz, nt: {Tensor.shape}, where: - ''') - - st.markdown(""" -- nv is the number of velocity components, also known as variables -- nx is the number of points in the Y axis that form the mesh -- ny is the number of points in the X axis that form the mesh -- nz is the number of points in the Z axis that form the mesh -- nt is the number of temporal components, also known as snapshots - """) - - st.write('Here is a brief example of what this data looks like') - - fig, ax = plt.subplots() - plt.suptitle('Dataset: Tensor.pkl') - ax.contourf(Tensor[0, :, :, 0, 0]) - ax.set_title('First velocity component of the first snapshot - XY plane') - st.pyplot(fig) - - fig, ax = plt.subplots() - plt.suptitle('Dataset: Tensor.pkl') - ax.contourf(Tensor[1, :, :, 0, 0]) - ax.set_title('Second velocity component of the first snapshot - XY plane') - st.pyplot(fig) - - fig, ax = plt.subplots() - plt.suptitle('Dataset: Tensor.pkl') - ax.contourf(Tensor[2, :, :, 0, 0]) - ax.set_title('Third velocity component of the first snapshot - XY plane') - st.pyplot(fig) - - st.write('''A variant to this database is also provided. In this case, a downscaled version: DS_30_Tensor.pkl''') - - st.write('Here is what this database looks like') - - with open(f'{path0}/DS_30_Tensor.pkl', 'rb') as file: - Tensords=pickle.load(file) - - Tensords = tf.transpose(Tensords, (0, 2, 1, 3, 4)) - - fig, ax = plt.subplots() - plt.suptitle('Dataset: DS_30_Tensor.pkl') - ax.contourf(Tensords[0, :, :, 0, 0]) - ax.set_title('First velocity component of the first snapshot - XY plane') - st.pyplot(fig) - - fig, ax = plt.subplots() - plt.suptitle('Dataset: DS_30_Tensor.pkl') - ax.contourf(Tensords[1, :, :, 0, 0]) - ax.set_title('Second velocity component of the first snapshot - XY plane') - st.pyplot(fig) - - fig, ax = plt.subplots() - plt.suptitle('Dataset: DS_30_Tensor.pkl') - ax.contourf(Tensords[2, :, :, 0, 0]) - ax.set_title('Third velocity component of the first snapshot - XY plane') - st.pyplot(fig) - - st.write("##### Check out the application tutorials at https://modelflows.github.io/modelflowsapp/tutorials/") - -if selected == 'Models': - # Sidebar with options - page = st.sidebar.selectbox("Select an option", ("Modal Decomposition", "Deep Learning")) - if page == 'Modal Decomposition': - action = st.sidebar.selectbox("Select an action", ("Pattern detection", "Reconstruction", "Prediction")) - if action == "Pattern detection": - option = st.sidebar.radio("Select an algorithm", ("SVD", "HOSVD", "HODMD")) - - elif action == "Reconstruction": - option = "Gappy SVD" - - elif action == "Prediction": - option = 'pred' - - if page == 'Deep Learning': - mdl = st.sidebar.selectbox("Select an option", ("Pattern detection", "Reconstruction", "Prediction")) - - if mdl == "Prediction": - option = st.sidebar.selectbox("Select a model type", ("Full DL model", "Hybrid DL Model")) - if option == 'Full DL model': - model_type = st.sidebar.radio('Select an architecture', ('CNN', 'RNN')) - model_type = model_type.lower() - elif option == 'Hybrid DL Model': - model_type1 = st.sidebar.radio('Select an architecture', ('CNN', 'RNN')) - - if model_type1 == 'RNN': - hybRNN_model_page.menu() - - if model_type1 == 'CNN': - hybCNN_model_page.menu() - - elif mdl == "Pattern detection": - option = "Autoencoder DNN Model" - - elif mdl == "Reconstruction": - option = "DNN Reconstruction Model" - - # Load the selected algorithm or model menu - if option == "SVD": - SVD_page.menu() - - if option == "HODMD": - hodmd_page.menu() - - if option == "HOSVD": - hosvd_page.menu() - - if option == "Gappy SVD": - gappy_page.menu() - - if option == "Autoencoder DNN Model": - autoencoders_page.menu() - - if option == "DNN Reconstruction Model": - DLsuperresolution_page.menu() - - if option == 'Full DL model': - FullDL_page.menu(model_type) - - if option == 'pred': - hodmd_pred_page.menu() - diff --git a/v0.1_ModelFLOWs_web/Requirements.txt b/v0.1_ModelFLOWs_web/Requirements.txt deleted file mode 100644 index 9eb8301..0000000 --- a/v0.1_ModelFLOWs_web/Requirements.txt +++ /dev/null @@ -1,10 +0,0 @@ -tensorflow==2.10 -streamlit==1.17 -scikit-learn==1.2 -ffmpeg -hdf5storage -numba==0.56.4 -torch==1.13.1 -scipy==1.9.3 -streamlit-option-menu==0.3.2 -keras-tuner diff --git a/v0.1_ModelFLOWs_web/SVD_page.py b/v0.1_ModelFLOWs_web/SVD_page.py deleted file mode 100644 index 7628f4f..0000000 --- a/v0.1_ModelFLOWs_web/SVD_page.py +++ /dev/null @@ -1,103 +0,0 @@ -import numpy as np -import os -import matplotlib.pyplot as plt -import streamlit as st -import data_fetch - -def svdtrunc(A): - U, S, V = np.linalg.svd(A, full_matrices = False) - return U, S, V - -def menu(): - st.title("Singular Value Decomposition, SVD") - st.write("""# -SVD (Singular Value Decomposition) is a mathematical technique used to analyze and reduce the dimensionality of databases. -It decomposes a given database into a set of orthogonal modes, capturing the most important patterns and variability in the data. -This allows for efficient storage, compression, and analysis of the information, enabling tasks such as visualization, model reduction, and optimization. - """) - st.write(" ## SVD - Parameter Configuration") - path0 = os.getcwd() - - selected_file = 'Tensor_cylinder_Re100.mat' - Tensor = data_fetch.fetch_data(path0, selected_file) - - n_modes = st.number_input('Select number of modes to retain during SVD', min_value = 0, max_value = None, value = 18, step = 1) - - go = st.button('Calculate') - if go: - with st.spinner('Please wait until the run is complete'): - shape = Tensor.shape - if not os.path.exists(f'{path0}/SVD_solution'): - os.mkdir(f'{path0}/SVD_solution') - if not os.path.exists(f'{path0}/SVD_solution/n_modes_{n_modes}'): - os.mkdir(f'{path0}/SVD_solution/n_modes_{n_modes}') - if not os.path.exists(f'{path0}/SVD_solution/n_modes_{n_modes}/svd_modes'): - os.mkdir(f'{path0}/SVD_solution/n_modes_{n_modes}/svd_modes') - - dims_prod = np.prod(shape[:-1]) - Tensor0 = np.reshape(Tensor, (dims_prod, shape[-1])) - - U, S, V = svdtrunc(Tensor0) - - S = np.diag(S) - - U = U[:,0:n_modes] - S = S[0:n_modes,0:n_modes] - V = V[0:n_modes,:] - - Reconst = (U @ S) @ V - - svd_modes = np.dot(U, S) - - fig, ax = plt.subplots(num = 'CLOSE TO CONTINUE RUN - SVD modes') - for i in range(S.shape[0]): - ax.plot(i+1, S[i][i] / S[0][0], "k*") - - ax.set_yscale('log') # Logarithmic scale in y axis - ax.set_xlabel('SVD modes') - ax.set_ylabel('Singular values') - ax.set_title('SVD modes vs. Singular values') - plt.savefig(f'{path0}/SVD_solution/svd_modes_plot.png', bbox_inches='tight') - st.pyplot(fig) - - newshape = [] - newshape.append(shape[:-1]) - newshape.append(svd_modes.shape[-1]) - newshape = list(newshape[0]) + [newshape[1]] - svd_modes = np.reshape(svd_modes, np.array(newshape)) - Reconst = np.reshape(Reconst, shape) - - RRMSE = np.linalg.norm(np.reshape(Tensor-Reconst,newshape=(np.size(Tensor),1)),ord=2)/np.linalg.norm(np.reshape(Tensor,newshape=(np.size(Tensor),1))) - st.write(f'\n###### Relative mean square error made during reconstruction: {np.round(RRMSE*100, 3)}%\n') - - st.info(f'All SVD mode plots will be saved to {path0}/SVD_solution/n_modes_{n_modes}/svd_modes') - st.info(f'Showing first 3 SVD modes for {svd_modes.shape[0]} components') - - for ModComp in range(svd_modes.shape[0]): - for ModeNum in range(svd_modes.shape[-1]): - fig, ax = plt.subplots() - ax.contourf(svd_modes[ModComp,:,:,ModeNum].real) - ax.set_title(f'SVD modes - Component {ModComp+1} Mode Number {ModeNum+1}', fontsize = 14) - ax.set_xlabel('X', fontsize = 10) - ax.set_ylabel('Y', fontsize = 10) - ax.axis('off') - fig.tight_layout() - plt.savefig(f'{path0}/SVD_solution/n_modes_{n_modes}/svd_modes/Comp_{ModComp+1}_svdmode_{ModeNum+1}.png') - if ModeNum <= 2: - st.pyplot(fig) - - - st.success('Run complete!') - - np.save(f'{path0}/SVD_solution/n_modes_{n_modes}/svd_modes.npy', svd_modes) - np.save(f'{path0}/SVD_solution/n_modes_{n_modes}/Reconst.npy', Reconst) - - st.warning(f"All files have been saved to {path0}/SVD_solution/n_modes_{n_modes}") - - st.info("Press 'Refresh' to run a new case") - Refresh = st.button('Refresh') - if Refresh: - st.stop() - - - diff --git a/v0.1_ModelFLOWs_web/Tensor.pkl b/v0.1_ModelFLOWs_web/Tensor.pkl deleted file mode 100644 index 63b86f7..0000000 --- a/v0.1_ModelFLOWs_web/Tensor.pkl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e80d679ca903c1e6ca42724e53d5da8a8d2c371558b294c79722a2bbce102ece -size 921600168 diff --git a/v0.1_ModelFLOWs_web/Tensor_cylinder_Re100.mat b/v0.1_ModelFLOWs_web/Tensor_cylinder_Re100.mat deleted file mode 100644 index 2518b7a..0000000 --- a/v0.1_ModelFLOWs_web/Tensor_cylinder_Re100.mat +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2e2b53b83fc2478fef9ee1168e459c274641c35502fde7d5de672fedbef2384e -size 199373261 diff --git a/v0.1_ModelFLOWs_web/autoencoders_page.py b/v0.1_ModelFLOWs_web/autoencoders_page.py deleted file mode 100644 index 0df0444..0000000 --- a/v0.1_ModelFLOWs_web/autoencoders_page.py +++ /dev/null @@ -1,368 +0,0 @@ -import streamlit as st -import os -import numpy as np -import pandas as pd -pd.set_option('display.max_columns',100) -pd.set_option('display.max_rows',100) -import math - -import os -import data_fetch - -# Load scikit-learn -from sklearn.model_selection import train_test_split - -# Load TensorFlow -import tensorflow as tf -from tensorflow import keras -from tensorflow.keras.models import Model -from tensorflow.keras.layers import Input -from tensorflow.keras.layers import Dense - -import matplotlib.animation as animation -import matplotlib.pyplot as plt -import streamlit.components.v1 as components -from IPython.display import HTML - -def animated_plot(path0, Tensor, vel, Title): - ''' - Function that creates an animated contourf plot - - Args: - path0 - path where the graph is to be saved to later be loaded on the streamlit app - Tensor - data file - vel - velocity variable: 0 for x velocity; 1 for y velocity - Title - Title for the graph (i.e. original data, reconstructed data...) - ''' - if Tensor.shape[-1] > Tensor.shape[0]: - Tensor = tf.transpose(Tensor, perm = [3, 1, 2, 0]) - - frames = Tensor.shape[0] - - fig, ax = plt.subplots() - - def animate(i): - ax.clear() - ax.contourf(Tensor[i, :, :, vel]) - ax.set_title(Title) - - interval = 2 - anim = animation.FuncAnimation(fig, animate, frames = frames, interval = interval*1e+2, blit = False) - - plt.show() - - with open(f"{path0}/animation.html","w") as f: - print(anim.to_html5_video(), file = f) - - HtmlFile = open(f"{path0}/animation.html", "r") - - source_code = HtmlFile.read() - - components.html(source_code, height = 700, width=900) - -def mean_absolute_percentage_error(y_true, y_pred): - epsilon = 1e-10 - y_true, y_pred = np.array(y_true), np.array(y_pred) - return np.mean(np.abs((y_true - y_pred) / np.maximum(epsilon,np.abs(y_true)))) * 100 - -def smape(A, F): - return ((100.0/len(A)) * np.sum(2 * np.abs(F - A) / (np.abs(A) + np.abs(F))+ np.finfo(float).eps)) - -def RRMSE(Tensor0, Reconst): - RRMSE = np.linalg.norm(np.reshape(Tensor0-Reconst,newshape=(np.size(Tensor0),1)),ord=2)/np.linalg.norm(np.reshape(Tensor0,newshape=(np.size(Tensor0),1))) - return(RRMSE) - -def menu(): - st.title("Autoencoders Model") - st.write(""" -An autoencoder is an unsupervised learning technique for neural networks that learns efficient data representations (encoding) by training the network to ignore signal “noise.” -""") - st.write(" ## Autoencoders DNN Model - Parameter Configuration") - - path0 = os.getcwd() - - if not os.path.exists(f'{path0}/Autoencoders_model_solution'): - os.mkdir(f"{path0}/Autoencoders_model_solution") - - # Menu parameters - - Tensor = 'Tensor_cylinder_Re100.mat' - - tensor = data_fetch.fetch_data(path0, Tensor) - - _, ny, nx, nt = tensor.shape - - # AEs parameters - type = st.selectbox('Select an autoencoder model', ('Spatial', 'Temporal')) - - - if type == 'Spatial': - type_AE=1 - elif type == 'Temporal': - type_AE=2 - - hyp_batch = st.slider("Select Batch Size", 2, 256, value = 64, step=2) - hyp_epoch = st.slider("Select Epochs", 0, 250, value = 200, step=1) - encoding_dim = st.number_input("Select autoencoder dimensions", 0, None, value = 10, step=1) - - # True: plot reconstruction vs original data - dec2 = st.radio(f"Plot autoencoder modes", ("Yes", "No")) - if dec2 == "Yes": - decision2 = True - AEs_toplot = encoding_dim - else: - decision2 = False - - dec5 = st.radio(f'Plot temporal coefficient of the modes', ('Yes', 'No')) - if dec5 == "Yes": - decision5 = True - else: - decision5 = False - - dec1 = st.radio("Original data vs Reconstruction video", ("Yes", "No")) - if dec1 == "Yes": - decision1 = True - else: - decision1 = False - - go = st.button("Calculate") - - if go: - - with st.spinner("Please wait while the model is being trained"): - RedStep=1 - tensor = tensor[:,0::RedStep,0::RedStep,0::RedStep] - ncomp, ny, nx, ntt = tensor.shape - - min_val=np.array(2) - - ## scale between [0,1] - min_val=np.zeros(ncomp,); max_val=np.zeros(ncomp,); range_val=np.zeros(ncomp,); std_val=np.zeros(ncomp,) - - tensor_norm=np.zeros(tensor.shape) - - for j in range(ncomp): - min_val [j] = np.amin(tensor[j,:,:,:]) - max_val [j] = np.amax(tensor[j,:,:,:]) - range_val [j] = np.ptp(tensor[j,:,:,:]) - std_val [j] =np.std(tensor[j,:,:,:]) - tensor_norm[j,...] = (tensor[j,...]-min_val[j])/range_val[j] - - # # ***AUTOENCODERS*** - # 3. Perform the ANN - tf.random.set_seed(221) ## Remove this to experience the randomness!!!! - keras.backend.clear_session() - - nxy2=ny*nx*ncomp - TT=tensor_norm.transpose((3,1,2,0)) - ntt, ny, nx, ncomp= TT.shape - - if type_AE ==1: - dim=nt - X_scale=np.reshape(TT,(dim,nxy2),order='F') - X_scale=X_scale.transpose((1,0)) - - elif type_AE ==2: - dim=nxy2 - X_scale=np.reshape(TT,(ntt,nxy2),order='F') - - input_img = Input(shape=(dim,)) - encoded = Dense(encoding_dim, activation='linear')(input_img) - decoded = Dense(dim, activation='linear')(encoded) - autoencoder = Model(input_img, decoded) - encoder = Model(input_img, encoded) - decoder = Model(encoded, decoded) - - autoencoder.compile(optimizer='adam', loss='mse') - # Get a summary - - - x_train, x_test, y_train, y_test = train_test_split(X_scale, - X_scale, - test_size=0.1) - - # CALLBACK : Early Stoping - callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, min_delta = 0.001) - - autoencoder.summary(print_fn=lambda x: st.text(x)) - - History = autoencoder.fit(x_train, x_train, - epochs=hyp_epoch, - batch_size=hyp_batch, - callbacks = [callback], - shuffle=True, - validation_data=(x_test, x_test)) - - st.write(History.history) - st.write("") - - st.success("The model has been trained!") - - # get convergence history - loss_linlin = History.history['loss'] - loss_v = History.history['val_loss'] - - # Prediction of the encoding/decoding - z = encoder.predict(X_scale) - x_tilde = autoencoder.predict(X_scale) - - #Check error - Err=np.linalg.norm(x_tilde-X_scale)/np.linalg.norm(X_scale) - - st.write(f'###### Neural Network RRMSE with all modes: {Err*100:.2f}%') - - rrmse_ = np.zeros((z.shape[1],)) - contrib = np.zeros((z.shape[1],)) - - for nm in range(0,encoding_dim): - z1=np.zeros(z.shape); - z1[:,0:nm] = z[:,0:nm]; - xx = decoder.predict(z1) - rrmse = RRMSE(X_scale,xx) - st.write(f'Adding mode number: {nm+1} - Updated RRMSE: {rrmse*100:.3f}%') - rrmse_[nm]=rrmse - - fig, ax = plt.subplots(figsize=(6, 4)) # This creates the figure - ax.plot(np.arange(0, encoding_dim)+1,rrmse_*100) - ax.scatter(np.arange(0, encoding_dim)+1,rrmse_*100) - ax.set_title("RRMSE value per added mode") - ax.set_xlabel('Mode added') - ax.set_ylabel('RRMSE') - plt.savefig(f'{path0}/Autoencoders_model_solution/RRMSE.png') - st.pyplot(fig) - - incr_rr_=np.zeros((encoding_dim,)) - for idel in range(0,encoding_dim): - array=np.arange(0,encoding_dim) - array=np.delete(array,idel) - - z1=np.zeros(z.shape) - z1[:,array]=z[:,array] - xx = decoder.predict(z1) - rrmse = RRMSE(X_scale,xx) - - incr_rr = rrmse- RRMSE(X_scale,x_tilde) - incr_rr_[idel]=incr_rr - st.write(f'Deleted mode: {idel+1} - Updated RRMSE: {rrmse*100:.3f}% - RRMSE increase (compared to all modes RRMSE): {incr_rr*100:.3f}%') - - fig, ax = plt.subplots(figsize=(6, 4)) # This creates the figure - ax.plot(np.arange(1,encoding_dim+1),incr_rr_*100) - ax.scatter(np.arange(1,encoding_dim+1),incr_rr_*100) - ax.set_title("RRMSE when mode 'n' is deleted") - ax.set_xlabel('Mode deleted') - ax.set_ylabel('RRMSE') - plt.savefig(f'{path0}/Autoencoders_model_solution/relative_RRMSE.png') - st.pyplot(fig) - - ## indexes for the sorting - - I = np.argsort(incr_rr_) - modes_sorted = np.flip(I) - - ## modes - z_sort = z[:,modes_sorted] - - lim=int(dim/ncomp) - RR=x_tilde[:,0:dim:1] - - - if type_AE ==1: - ntt= x_tilde.shape[1] - RR=x_tilde[:,0:dim:1] - RR = np.transpose(RR,(1,0)) - ZZT= np.reshape(RR,(ntt,ny,nx,ncomp),order='F') - ZZT= np.transpose(ZZT,(3,1,2,0)) - - elif type_AE ==2: - ntt= x_tilde.shape[0] - RR=x_tilde[:,0:dim:1] - ntt= x_tilde.shape[0] - ZZT=np.reshape(RR,(ntt,ny,nx,ncomp),order='F') - ZZT= np.transpose(ZZT,(3,1,2,0)) - - if decision1 == True: - - lim = int(dim/ncomp) - RR = x_tilde[:,0:dim:1] - - ntt = x_tilde.shape[1] - - RR = np.transpose(RR,(1,0)) - ZZT = np.reshape(RR,(ntt,ny,nx,ncomp),order='F') - ZZT = np.transpose(ZZT,(3,1,2,0)) - - np.save(f'{path0}/Autoencoders_model_solution/SM_Reconst', ZZT) - # CONTOUR AUTOENCODER -- CHECK RECONSTRUCTION - - plot_titles = {0: 'U velocity', - 1: 'V Velocity'} - - velocity = 0 - - tensor_copy = tensor.copy() - ZZT_copy = ZZT.copy() - - animated_plot(path0, tensor_copy, vel = velocity, Title = f'Real data - {plot_titles[velocity]}') - animated_plot(path0, ZZT_copy, vel = velocity, Title = f'Sp. Modes recontructed data - {plot_titles[velocity]}') - - velocity = 1 - - animated_plot(path0, tensor_copy, vel = velocity, Title = f'Real data - {plot_titles[velocity]}') - animated_plot(path0, ZZT_copy, vel = velocity, Title = f'Sp. Modes recontructed data - {plot_titles[velocity]}') - - if decision2 == True : # PLOT AUTOENCODER NUMBER XX - if not os.path.exists(f'{path0}/Autoencoders_model_solution/Autoencoders'): - os.mkdir(f"{path0}/Autoencoders_model_solution/Autoencoders") - st.info(f'Showing all autoencoder components') - st.info(f'All generated plots will be saved to {path0}/Autoencoders_model_solution/Autoencoders') - - for AEnum in range(min(AEs_toplot,encoding_dim)): - if type_AE ==1: - MODE=np.transpose(z_sort[:,AEnum]) - AE=MODE[0:int(nx*ny*ncomp)] - Rec_AE=np.reshape(AE,(ny,nx,ncomp),order='F') - - elif type_AE ==2: - MODE=np.transpose(z_sort[:,AEnum]) - RECONST_AUTO=np.matmul(MODE, X_scale) - AE = RECONST_AUTO[0:int(nx*ny*ncomp)] - Rec_AE=np.reshape(AE,(ny,nx,ncomp),order='F') - - fig, ax = plt.subplots(1, Rec_AE.shape[-1], figsize = (20, 7)) - plt.suptitle(f'Autoencoder mode {AEnum+1}') - for i in range(Rec_AE.shape[-1]): - ax[i].contourf(Rec_AE[..., i]) - ax[i].set_title(f"Component {i+1}") - - plt.savefig(f"{path0}/Autoencoders_model_solution/Autoencoders/AE_{i+1}.png") - st.pyplot(fig) - - np.save(f'{path0}/Autoencoders_model_solution/RecAE', Rec_AE) - - if decision5 == True: - for AEnum in range(min(AEnum,encoding_dim)): - if type_AE ==1: - ss=np.transpose(z_sort[:,AEnum]) - coef_t = np.matmul(ss,X_scale) - - elif type_AE ==2: - ss=np.transpose(z_sort[:,AEnum]) - coef_t = ss - - fig, ax = plt.subplots(figsize=(8, 4), num = f'CLOSE TO CONTINUE RUN - Mode {AEnum+1}') - ax.plot(coef_t) - ax.set_xlabel('Snapshots') - ax.set_ylabel('Amplitude') - ax.set_title(f'Temporal coefficient of Mode {AEnum+1}') - fig.tight_layout() - plt.savefig(f'{path0}/Autoencoders_model_solution/coeft_mode_{AEnum+1}.png') - st.pyplot(fig) - - st.success('Run complete!') - st.warning(f'All files have been saved to {path0}/Autoencoders_model_solution') - st.info("Press 'Refresh' to run a new case") - Refresh = st.button('Refresh') - if Refresh: - st.stop() - - diff --git a/v0.1_ModelFLOWs_web/contour_anim.py b/v0.1_ModelFLOWs_web/contour_anim.py deleted file mode 100644 index da70daf..0000000 --- a/v0.1_ModelFLOWs_web/contour_anim.py +++ /dev/null @@ -1,38 +0,0 @@ -import matplotlib.animation as animation -import matplotlib.pyplot as plt -import streamlit.components.v1 as components -from IPython.display import HTML - -def animated_plot(path0, Tensor, vel, Title): - ''' - Function that creates an animated contourf plot - - Args: - path0 - path where the graph is to be saved to later be loaded on the streamlit app - Tensor - data file - vel - velocity variable: 0 for x velocity; 1 for y velocity - Title - Title for the graph (i.e. original data, reconstructed data...) - ''' - - frames = Tensor.shape[-1] - - fig, ax = plt.subplots() - - def animate(i): - ax.clear() - ax.contourf(Tensor[vel, :, :, i]) - ax.set_title(Title) - - interval = 2 - anim = animation.FuncAnimation(fig, animate, frames = frames, interval = interval*1e+2, blit = False) - - plt.show() - - with open(f"{path0}/animation.html","w") as f: - print(anim.to_html5_video(), file = f) - - HtmlFile = open(f"{path0}/animation.html", "r") - - source_code = HtmlFile.read() - - components.html(source_code, height = 900, width=900) diff --git a/v0.1_ModelFLOWs_web/data_fetch.py b/v0.1_ModelFLOWs_web/data_fetch.py deleted file mode 100644 index a1efdee..0000000 --- a/v0.1_ModelFLOWs_web/data_fetch.py +++ /dev/null @@ -1,18 +0,0 @@ -import hdf5storage -from sys import platform - -def fetch_data(path0, selected_file): - # Check the operating system - if platform in ["linux", "linux2"]: - # linux - Tensor_ = hdf5storage.loadmat(f'{path0}/{selected_file}') - Tensor = list(Tensor_.values())[-1] - elif platform == "darwin": - # OS X - Tensor_ = hdf5storage.loadmat(f'{path0}/{selected_file}') - Tensor = list(Tensor_.values())[-1] - elif platform in ["win32", "win64"]: - # Windows - Tensor_ = hdf5storage.loadmat(f'{path0}\{selected_file}') - Tensor = list(Tensor_.values())[-1] - return Tensor \ No newline at end of file diff --git a/v0.1_ModelFLOWs_web/gappy_page.py b/v0.1_ModelFLOWs_web/gappy_page.py deleted file mode 100644 index 8122b83..0000000 --- a/v0.1_ModelFLOWs_web/gappy_page.py +++ /dev/null @@ -1,271 +0,0 @@ -import os -import data_fetch -import numpy as np -from scipy.interpolate import griddata -from numpy import linalg as LA -import matplotlib.pyplot as plt -import streamlit as st -import hosvd - -def Gappy_SVD(A_gappy, Tensor, m, method, decision_1, output_1, output_2, path0, method_ = None): - N = sum(np.isnan(A_gappy.flatten())) - - st.write('Making initial reconstruction') - # Initial Repairing - if decision_1 == 'zeros': - A0_1 = np.nan_to_num(A_gappy, nan = 0) - elif decision_1 == 'mean': - A0_1 = np.nan_to_num(A_gappy, nan = 0) - A0_1 = np.nan_to_num(A_gappy,nan=sum(A0_1.flatten())/(A0_1.size-N)) - elif decision_1 == 'tensor_interp': - shape = A_gappy.shape - A_gappy_re = np.reshape(A_gappy, (A_gappy.shape[0], A_gappy.shape[1] * A_gappy.shape[2], A_gappy.shape[3])) - for i in range(A_gappy_re.shape[0]): - for j in range(A_gappy_re.shape[-1]): - velocity_values = A_gappy_re[i, :, j] - nan_mask = np.isnan(velocity_values) - - non_nan_indices = np.where(~nan_mask)[0] - - interpolated_values = griddata(non_nan_indices, velocity_values[~nan_mask], np.arange(A_gappy_re.shape[1]), method=method_) - - A_gappy_re[i, nan_mask, j] = interpolated_values[nan_mask] - - A0_1 = np.reshape(A_gappy_re, shape) - - st.write('Initial reconstruction complete') - - A_s = A0_1.copy() - MSE_gaps = np.zeros(500) - - st.write('Performing HOSVD. Please wait') - for ii in range(500): - - if method == 'svd': - [U,S,V]=LA.svd(A_s) - S = np.diag(S) - A_reconst = U[:,0:m] @ S[0:m,0:m] @ V[0:m,:] - elif method == 'hosvd': - n = m*np.ones(np.shape(A_s.shape)) - A_reconst = hosvd.HOSVD_function(A_s,n)[0] - - - MSE_gaps[ii] = LA.norm(A_reconst[np.isnan(A_gappy)]-A_s[np.isnan(A_gappy)])/N - - if ii>3 and MSE_gaps[ii]>=MSE_gaps[ii-1]: - break - else: - A_s[np.isnan(A_gappy)] = A_reconst[np.isnan(A_gappy)] - - st.write('HOSVD complete') - - if output_1 == 'yes': - sv0 = hosvd.HOSVD_function(A0_1,n)[3] - sv = hosvd.HOSVD_function(A_s,n)[3] - cmap = plt.cm.get_cmap('jet') - rgba = cmap(np.linspace(0,1,A_s.ndim)) - fig, ax = plt.subplots() - for i in range(A_s.ndim): - ax.semilogy(sv0[0,i]/sv0[0,i][0], linestyle = 'none', marker = 'x',color = rgba[i]) - ax.semilogy(sv[0,i]/sv[0,i][0], linestyle = 'none', marker = '+', color = rgba[i]) - plt.legend(['Initial Reconstruction','Final Reconstruction']) - plt.savefig(f'{path0}/Data_repair_solution_nmodes_{m}_fill_{decision_1}/svd_decay.png') - st.pyplot(fig) - - Tensor0 = Tensor[:A_s.shape[0], :A_s.shape[1], :A_s.shape[2], :A_s.shape[3]].copy() - RRMSE = np.linalg.norm(np.reshape(Tensor0 - A_s ,newshape=(np.size(Tensor0),1)),ord=2)/np.linalg.norm(np.reshape(Tensor0,newshape=(np.size(Tensor0),1))) - st.write(f'###### Error made during reconstruction: {np.round(RRMSE*100, 3)}%\n') - - if output_2 == 'yes': - for var in range(A_gappy.shape[0]): - fig, ax = plt.subplots(figsize=(20, 7), num = f'CLOSE TO CONTINUE RUN - Initial data for component {var+1}') - heatmap = ax.imshow(A_gappy[var, ..., 0], cmap='coolwarm') - ax.set_title(f'Initial gappy data - Component {var+1}', fontsize = 14) - ax.set_xlabel('X') - ax.set_ylabel('Y') - ax.axis('off') - heatmap.set_clim(np.nanmin(A_gappy[var, ..., 0]), np.nanmax(A_gappy[var, ..., 0])) - st.pyplot(fig) - - # Initial and final reconstruction vs ground truth - fig, ax = plt.subplots(1, 3, figsize = (20, 7)) - plt.suptitle('Data comparison for vel. component 1 and snapshot 1', fontsize = 16) - ax[0].contourf(A_gappy[0, ..., 0]) - ax[0].set_title('Initial data', fontsize = 14) - ax[0].axis('off') - - ax[1].contourf(A_reconst[0, ..., 0]) - ax[1].set_title('Reconstructed data', fontsize = 14) - ax[1].axis('off') - - ax[2].contourf(Tensor[0, ..., 0]) - ax[2].set_title('Ground truth', fontsize = 14) - ax[2].axis('off') - fig.tight_layout() - st.pyplot(fig) - - fig, ax = plt.subplots(1, 3, figsize = (20, 7)) - plt.suptitle('Data comparison for vel. component 2 and snapshot 1', fontsize = 16) - ax[0].contourf(A_gappy[1, ..., 0]) - ax[0].set_title('Initial data', fontsize = 14) - ax[0].axis('off') - - ax[1].contourf(A_reconst[1, ..., 0]) - ax[1].set_title('Reconstructed data', fontsize = 14) - ax[1].axis('off') - - ax[2].contourf(Tensor[1, ..., 0]) - ax[2].set_title('Ground truth', fontsize = 14) - ax[2].axis('off') - fig.tight_layout() - st.pyplot(fig) - - np.save(f'{path0}/Data_repair_solution_nmodes_{m}_fill_{decision_1}/Repaired_data.npy', A_reconst) - - -def Gappy_augment(A_down, enhance, selected_database, path0): - m = [1, 2] - - enhance_ = 2**enhance - - A_d = A_down - - st.write('Enhancing data resolution') - - for ii in range(enhance): - if ii == 0: - n = A_d.shape - - _ , S, U, _ , _ = hosvd.HOSVD_function(A_d,n) - - Udens = U - - for dim in m: - x = np.linspace(0, 1, U[0][dim].shape[0]*2) - U_dens = np.zeros((x.shape[0],S.shape[dim])) - for j in range(S.shape[dim]): - Udenscolumn = U[0][dim][:,j] - U_dens[:,j] = np.interp(x, x[0:x.shape[0]:2], Udenscolumn) - Udens[0][dim] = U_dens - - A_d = hosvd.tprod(S, Udens) - A_d = hosvd.HOSVD_function(A_d,n)[0] - - A_reconst = A_d - - st.write('Data resolution enhanced') - - st.write(f'Initial data resolution: {A_down.shape}') - st.write(f'New data resolution: {A_reconst.shape}') - - st.info('Plotting first snapshot for both velocity components') - - for v in range(A_down.shape[0]): - fig, ax = plt.subplots(1, 2, figsize = (20, 7)) - plt.suptitle(f'Superresolution for vel. component {v+1} and snapshot 1', fontsize = 16) - ax[0].contourf(A_down[v, :, :, 0]) - ax[0].set_title(f'Original data', fontsize = 14) - ax[0].xaxis.grid(True, zorder = 0) - ax[0].yaxis.grid(True, zorder = 0) - - ax[1].contourf(A_reconst[v, :, :, 0]) - ax[1].set_title(f'Enhanced data', fontsize = 14) - ax[1].xaxis.grid(True, zorder = 0) - ax[1].yaxis.grid(True, zorder = 0) - fig.tight_layout() - st.pyplot(fig) - - np.save(f"{path0}/Superresolution_{selected_database}_enhancement_{enhance_}/Enhanced_data.npy", A_reconst) - -def menu(): - st.title("Data Reconstruction") - st.write(""" -This module uses modal decomposition techniques to reconstruct datasets with missing data or enhance the resolution of a dataset. -This is acheived by combining interpolation techniques with HOSVD. - """) - - path0 = os.getcwd() - - option = st.selectbox('Select an option', ('Repair', 'Superresolution')) - - if option == 'Repair': - - st.write(" ## Data Repairing") - - # 1. Fetch data matrix or tensor - selected_database = 'Gappy_Tensor_cylinder_Re100.mat' - A_gappy = data_fetch.fetch_data(path0, selected_database) - selected_database1 = 'Tensor_cylinder_Re100.mat' - Tensor = data_fetch.fetch_data(path0, selected_database1) - - # 2. Number of retained modes - m = st.number_input(f'Number of modes to retain during repair', min_value = 0, max_value = None, value = 18, step = 1) - m = int(m) - - if A_gappy.ndim > 2: - method = 'hosvd' - elif A_gappy.ndim <= 2: - method = 'svd' - - decision = ('interpolation', 'zeros', 'mean') - - decision_1 = st.selectbox('Select a decision to fill missing data', decision) - - if decision_1 == 'interpolation': - decision_1 = 'tensor_interp' - method_ = st.radio('Select an interpolation method', ('nearest', 'linear', 'cubic')) - else: - method_ = None - st.write('### Output Configuration') - - output_1 = st.radio('Visualize decay of the singular values before/after', ('yes', 'no')) - output_2 = st.radio('Visualize comparison of the surfaces', ('yes', 'no')) - - go = st.button('Calculate') - - if not os.path.exists(f'{path0}/Data_repair_solution_nmodes_{m}_fill_{decision_1}'): - os.mkdir(f"{path0}/Data_repair_solution_nmodes_{m}_fill_{decision_1}") - - if go: - with st.spinner('Please wait for the run to complete'): - - Gappy_SVD(A_gappy, Tensor, m, method, decision_1, output_1, output_2, path0, method_) - - st.success('Run complete!') - st.warning(f"All files have been saved to {path0}/Data_repair_solution_nmodes_{m}_fill_{decision_1}") - - st.info("Press 'Refresh' to run a new case") - Refresh = st.button('Refresh') - if Refresh: - st.stop() - - elif option == 'Superresolution': - st.write(" ## Data Superresolution") - selected_database = 'DS_30_Tensor_cylinder_Re100.mat' - A_down = data_fetch.fetch_data(path0, selected_database) - - # 2. Number of retained modes - augment = st.number_input(f'Select the enhancement scale (2^factor)', min_value = 0, max_value = None, value = 4, step = 1) - augment = int(augment) - - augment_ = 2**augment - - go = st.button('Calculate') - - selected_database = selected_database.replace(".mat", "") - - if not os.path.exists(f'{path0}/Superresolution_{selected_database}_enhancement_{augment_}'): - os.mkdir(f"{path0}/Superresolution_{selected_database}_enhancement_{augment_}") - - if go: - with st.spinner('Please wait for the run to complete'): - - Gappy_augment(A_down, augment, selected_database, path0) - - st.success('Run complete!') - st.warning(f"All files have been saved to {path0}/Superresolution_{selected_database}_enhancement_{augment_}") - - st.info("Press 'Refresh' to run a new case") - Refresh = st.button('Refresh') - if Refresh: - st.stop() \ No newline at end of file diff --git a/v0.1_ModelFLOWs_web/hodmd_page.py b/v0.1_ModelFLOWs_web/hodmd_page.py deleted file mode 100644 index b10d6b0..0000000 --- a/v0.1_ModelFLOWs_web/hodmd_page.py +++ /dev/null @@ -1,481 +0,0 @@ -# Import functions -import data_fetch -import numpy as np -import DMDd -import os -import sys -import matplotlib.pyplot as plt -import streamlit as st -import pandas as pd -import contour_anim -import hosvd - -def HODMD_func(Tensor, deltaT, varepsilon1, varepsilon2, d, SNAP, path0, decision, decision1): - # Create new folder: - if not os.path.exists(f'{path0}/HODMD_solution'): - os.mkdir(f'{path0}/HODMD_solution') - if not os.path.exists(f'{path0}/HODMD_solution/d_{d}_tol_{varepsilon1}'): - os.mkdir(f'{path0}/HODMD_solution/d_{d}_tol_{varepsilon1}') - if not os.path.exists(f'{path0}/HODMD_solution/d_{d}_tol_{varepsilon1}/DMDmodes'): - os.mkdir(f'{path0}/HODMD_solution/d_{d}_tol_{varepsilon1}/DMDmodes') - - Time = np.linspace(0,SNAP-1,num=SNAP)*deltaT - Tensor = Tensor[..., :SNAP] - Tensor0 = Tensor.copy() - dims = Tensor.ndim - shape = Tensor.shape - - dims_prod = np.prod(shape[:-1]) - Tensor = np.reshape(Tensor, (dims_prod, shape[-1])) - - notone=0 - for i in range(0,np.size(np.shape(Tensor))): - if np.shape(Tensor)[i] != 1: - notone=notone+1 - - if notone<=2: - if d==1: - st.write('Performing DMD') - st.write("") - [u,Amplitude,Eigval,GrowthRate,Frequency,DMDmode] = DMDd.dmd1(Tensor, Time, varepsilon1, varepsilon2) - st.write("") - st.write('DMD complete') - dt=Time[1]-Time[0] - icomp=complex(0,1) - mu=np.zeros(np.size(GrowthRate),dtype=np.complex128) - for iii in range(0,np.size(GrowthRate)): - mu[iii] = np.exp(np.dot(dt,GrowthRate[iii]+np.dot(icomp,Frequency[iii]))) - Reconst=DMDd.remake(u,Time,mu) - else: - st.write('Performing HODMD') - st.write("") - [u,Amplitude,Eigval,GrowthRate,Frequency,DMDmode] = DMDd.hodmd(Tensor, d, Time, varepsilon1, varepsilon2) - st.write("") - st.write('HODMD complete') - dt=Time[1]-Time[0] - icomp=complex(0,1) - mu=np.zeros(np.size(GrowthRate),dtype=np.complex128) - for iii in range(0,np.size(GrowthRate)): - mu[iii] = np.exp(np.dot(dt,GrowthRate[iii]+np.dot(icomp,Frequency[iii]))) - Reconst=DMDd.remake(u,Time,mu) - - - newshape = [] - newshape.append(shape[:-1]) - newshape.append(DMDmode.shape[-1]) - newshape = list(newshape[0]) + [newshape[1]] - DMDmode = np.reshape(DMDmode, np.array(newshape)) - Reconst = np.reshape(Reconst, shape) - RRMSE = np.linalg.norm(np.reshape(Tensor0-Reconst,newshape=(np.size(Tensor0),1)),ord=2)/np.linalg.norm(np.reshape(Tensor0,newshape=(np.size(Tensor0),1))) - st.write(f'\n###### Relative mean square error made in the calculations: {np.round(RRMSE*100, 3)}%') - - - GRFreqAmp = np.zeros((np.size(GrowthRate),3)) - GRFreqAmp[:,0] = GrowthRate[:] - GRFreqAmp[:,1] = Frequency[:] - GRFreqAmp[:,2] = Amplitude[:] - - - st.write("") - st.table(pd.DataFrame(GRFreqAmp, columns=['GrowthRate', 'Frequency', 'Amplitude'])) - st.write("") - - # Reconstruction: - np.save(f'{path0}/HODMD_solution/d_{d}_tol_{varepsilon1}/TensorReconst',Reconst) - - # GrowthRate, Frequency and Amplitude: - np.save(f'{path0}/HODMD_solution/d_{d}_tol_{varepsilon1}/GrowthRateFrequencyAmplitude',GRFreqAmp) - - ## Calculate DMD modes: - np.save(f'{path0}/HODMD_solution/d_{d}_tol_{varepsilon1}/DMDmode',DMDmode) - - fig, ax = plt.subplots() - ax.plot(Frequency,GrowthRate, 'k+') - plt.yscale('log') - ax.set_xlabel('Frequency ($\omega_{n}$)') - ax.set_ylabel('GrowthRate ($\delta_{n}$)') - plt.savefig(f'{path0}/HODMD_solution/d_{d}_tol_{varepsilon1}/FrequencyGrowthRate.png') - st.pyplot(fig) - - fig, ax = plt.subplots() - ax.plot(Frequency, Amplitude/np.amax(Amplitude),'r+') - ax.set_yscale('log') # Logarithmic scale in Tensor0.shape[1]/3 axis - ax.set_xlabel('Frequency ($\omega_{n}$)') - ax.set_ylabel('Amplitude divided by max. amplitude ($a_{n}$)') - plt.savefig(f'{path0}/HODMD_solution/d_{d}_tol_{varepsilon1}/FrequencyAmplitude.png') - st.pyplot(fig) - - st.info(f'Saving DMDmodes plots to {path0}/HODMD_solution/d_{d}_tol_{varepsilon1}/DMDmodes\n') - for ModComp in range(DMDmode.shape[0]): - for ModeNum in range(DMDmode.shape[-1]): - fig, ax = plt.subplots(1, 2, figsize = (20, 7)) - fig.suptitle(f'DMDmode - Component {ModComp+1} Mode Number {ModeNum+1}', fontsize = 16) - ax[0].contourf(DMDmode[ModComp,:,:,ModeNum].real) - ax[0].set_title('Real part', fontsize = 14) - ax[0].set_xlabel('X', fontsize = 10) - ax[0].set_ylabel('Y', fontsize = 10) - - ax[1].contourf(DMDmode[ModComp,:,:,ModeNum].imag) - ax[1].set_title('Imaginary part', fontsize = 14) - ax[1].set_xlabel('X', fontsize = 10) - ax[1].set_ylabel('Y', fontsize = 10) - fig.tight_layout() - plt.savefig(f'{path0}/HODMD_solution/d_{d}_tol_{varepsilon1}/DMDmodes/DMDmodeComp_{ModComp+1}_ModeNum_{ModeNum+1}.png') - plt.close(fig) - - st.info('Plotting first 3 DMD modes for all velocity components') - for ModComp in range(DMDmode.shape[0]): - for ModeNum in range(3): - fig, ax = plt.subplots(1, 2, figsize = (20, 7)) - fig.suptitle(f'DMDmode - Component {ModComp+1} Mode Number {ModeNum+1}', fontsize = 16) - ax[0].contourf(DMDmode[ModComp,:,:,ModeNum].real) - ax[0].set_title('Real part', fontsize = 14) - ax[0].axis('off') - - ax[1].contourf(DMDmode[ModComp,:,:,ModeNum].imag) - ax[1].set_title('Imaginary part', fontsize = 14) - ax[1].axis('off') - fig.tight_layout() - st.pyplot(fig) - - st.info('Plotting Real data vs Reconstruction') - for i in range(Tensor0.shape[0]): - fig, ax = plt.subplots(1, 2, figsize = (20, 7)) - fig.suptitle(f'Real Data vs Reconstruction - Vel. comparison - Component {i+1}') - ax[0].contourf(Tensor0[i, :, :, 0]) - ax[0].scatter(Tensor0.shape[2]/4, Tensor0.shape[1]/3, c ='black', s=50) - ax[1].plot(Time[:], Reconst[i, int(Tensor0.shape[1]/3), int(Tensor0.shape[2]/4), :], 'k-x', label = 'Reconstructed Data') - ax[1].plot(Time[:], Tensor0[i, int(Tensor0.shape[1]/3), int(Tensor0.shape[2]/4), :], 'r-+', label = 'Real Data') - ax[1].set_xlabel('Time') - ax[1].set_ylabel('Data') - ax[1].legend(bbox_to_anchor=(1.04, 0.5), loc="center left", borderaxespad=0) - fig.tight_layout() - plt.savefig(f'{path0}/HODMD_solution/d_{d}_tol_{varepsilon1}/OrigReconst_{i}.png') - st.pyplot(fig) - - - if decision == 'Yes': - st.info('Comparison of Real Data vs Reconstruction') - if decision1 != 'V': - contour_anim.animated_plot(path0, Tensor0, vel=0, Title = 'Real Data U velocity') - contour_anim.animated_plot(path0, Reconst, vel=0, Title = 'Reconstruction U velocity') - if decision1 != 'U': - contour_anim.animated_plot(path0, Tensor0, vel=1, Title = 'Real Data V velocity') - contour_anim.animated_plot(path0, Reconst, vel=1, Title = 'Reconstruction V velocity') - -def HODMD_IT(Tensor, SNAP, varepsilon1, varepsilon2, deltaT, TimePos, d, path0, decision, decision1, type_, iters): - # Create new folder: - if not os.path.exists(f'{path0}/{type_}_HODMD_solution'): - os.mkdir(f"{path0}/{type_}_HODMD_solution") - if not os.path.exists(f'{path0}/{type_}_HODMD_solution/d_{d}_tol_{varepsilon1}'): - os.mkdir(f"{path0}/{type_}_HODMD_solution/d_{d}_tol_{varepsilon1}") - - ## Tensor dimension - number of snapshots: - - Tensor = Tensor[..., 0:SNAP] - - TensorR = Tensor.copy() - - Time = np.linspace(0, SNAP-1, num = SNAP) * deltaT - - path0 = os.getcwd() - - dims = Tensor.ndim - - ## ALGORITHM: - - ## ITERATIVE: - nn0 = np.shape(Tensor) - nn = np.array(nn0) - nn[1:np.size(nn)] = 0 - st.write("") - for zz in range(0,iters): - if iters > 1: - st.write(f'##### Iteration number: {zz+1}') - - if zz != 0: - del S,U,Frequency,GrowthRate,hatT,hatMode,sv,TensorR,nnin - TensorR = TensorReconst - - ## Perform HOSVD decomposition to calculate the reduced temporal matrix: hatT - nnin = nn - nnin = tuple(nnin) - st.write('Performing HOSVD') - [hatT,U,S,sv,nn1,n,TT] = hosvd.HOSVD(TensorR,varepsilon1,nn,nn0,TimePos) - st.write('HOSVD complete') - st.write("") - - ## Perform HODMD to the reduced temporal matrix hatT: - st.write('Performing HODMD') - st.write("") - [hatMode,Amplitude,Eigval,GrowthRate,Frequency] = DMDd.hodmd_IT(hatT,d,Time,varepsilon1,varepsilon2) - st.write("") - st.write('HODMD complete') - - ## Reconstruct the original Tensor using the DMD expansion: - TensorReconst = DMDd.reconst_IT(hatMode,Time,U,S,sv,nn1,TimePos,GrowthRate,Frequency) - - ## Print outcome: - - RRMSE = np.linalg.norm(np.reshape(Tensor-TensorReconst,newshape=(np.size(Tensor),1)),ord=2)/np.linalg.norm(np.reshape(Tensor,newshape=(np.size(Tensor),1))) - st.write(f'\n###### Relative mean square error made in the calculations: {np.round(RRMSE*100, 3)}%') - st.write(""" - - """) - - GRFreqAmp = np.zeros((np.size(GrowthRate),3)) - GRFreqAmp[:,0] = GrowthRate[:] - GRFreqAmp[:,1] = Frequency[:] - GRFreqAmp[:,2] = Amplitude[:] - - ## Break the loop when the number of singular values is the same in two consecutive iterations: - - num = 0 - for ii in range(1,np.size(nn1)): - if nnin[ii]==nn1[ii]: - num = num+1 - - if num==np.size(nn1)-1: - break - nn = nn1 - print('\n') - - ## Save the reconstruction of the tensor and the Growth rates, frequencies and amplitudes: - - # Reconstruction: - np.save(f'{path0}/{type_}_HODMD_solution/d_{d}_tol_{varepsilon1}/TensorReconst',TensorReconst) - - - st.table(pd.DataFrame(GRFreqAmp, columns=['GrowthRate', 'Frequency', 'Amplitude'])) - st.write("") - - # GrowthRate, Frequency and Amplitude: - np.save(f'{path0}/{type_}_HODMD_solution/d_{d}_tol_{varepsilon1}/GrowthRateFrequencyAmplitude',GRFreqAmp) - - - ## Calculate DMD modes: - N = np.shape(hatT)[0] - DMDmode = DMDd.modes_IT(N,hatMode,Amplitude,U,S,nn1,TimePos) - np.save(f'{path0}/{type_}_HODMD_solution/d_{d}_tol_{varepsilon1}/DMDmode',DMDmode) - - # Frequency vs absolute value of GrowthRate: - - fig1, ax1 = plt.subplots() - ax1.plot(Frequency,np.abs(GrowthRate), 'k+') - ax1.set_yscale('log') # Logarithmic scale in y axis - ax1.set_xlabel('Frequency ($\omega_{m}$)') - ax1.set_title('Frequency vs. GrowthRate') - ax1.set_ylabel('Absolute value of GrowthRate (|$\delta_{m}$|)') - plt.savefig(f'{path0}/{type_}_HODMD_solution/d_{d}_tol_{varepsilon1}/FrequencyGrowthRate.png') - - st.pyplot(fig1) - - # Frequency vs amplitudes: - fig2, ax2 = plt.subplots() - ax2.plot(Frequency, Amplitude,'r+') - ax2.set_yscale('log') # Logarithmic scale in y axis - ax2.set_xlabel('Frequency ($\omega_{m}$)') - ax2.set_ylabel('Amplitude ($a_{m}$)') - ax2.set_title('Frequency vs. Amplitude') - plt.savefig(f'{path0}/{type_}_HODMD_solution/d_{d}_tol_{varepsilon1}/FrequencyAmplitude.png') - - st.pyplot(fig2) - - if not os.path.exists(f'{path0}/{type_}_HODMD_solution/d_{d}_tol_{varepsilon1}/DMDmodes'): - os.mkdir(f"{path0}/{type_}_HODMD_solution/d_{d}_tol_{varepsilon1}/DMDmodes") - - st.info(f'Saving DMD mode plots to {path0}/{type_}_HODMD_solution/d_{d}_tol_{varepsilon1}/DMDmodes\n') - for ModComp in range(DMDmode.shape[0]): - for ModeNum in range(DMDmode.shape[-1]): - fig, ax = plt.subplots(1, 2, figsize = (20, 7)) - fig.suptitle(f'DMDmode - Component {ModComp+1} Mode Number {ModeNum+1}') - ax[0].contourf(DMDmode[ModComp,:,:,ModeNum].real) - ax[0].set_title('Real part') - ax[0].set_xlabel('X') - ax[0].set_ylabel('Y') - - ax[1].contourf(DMDmode[ModComp,:,:,ModeNum].imag) - ax[1].set_title('Imaginary part') - ax[1].set_xlabel('X') - ax[1].set_ylabel('Y') - fig.tight_layout() - plt.savefig(f'{path0}/{type_}_HODMD_solution/d_{d}_tol_{varepsilon1}/DMDmodes/DMDmodeComp_{ModComp+1}_ModeNum_{ModeNum+1}.png') - plt.close(fig) - - st.info('Plotting first 3 DMD modes for all velocity components') - for ModComp in range(DMDmode.shape[0]): - for ModeNum in range(3): - fig, ax = plt.subplots(1, 2, figsize = (20, 7)) - fig.suptitle(f'DMDmode - Component {ModComp+1} Mode Number {ModeNum+1}', fontsize = 16) - ax[0].contourf(DMDmode[ModComp,:,:,ModeNum].real) - ax[0].set_title('Real part', fontsize = 14) - ax[0].set_xlabel('X') - ax[0].set_ylabel('Y') - ax[0].axis('off') - - ax[1].contourf(DMDmode[ModComp,:,:,ModeNum].imag) - ax[1].set_title('Imaginary part', fontsize = 14) - ax[1].set_xlabel('X') - ax[1].set_ylabel('Y') - ax[1].axis('off') - fig.tight_layout() - st.pyplot(fig) - - st.info('Plotting Real data vs Reconstruction') - for i in range(Tensor.shape[0]): - fig, ax = plt.subplots(1, 2, figsize = (20, 7)) - fig.suptitle(f'Real Data vs Reconstruction - Vel. comparison - Component {i+1}') - ax[0].contourf(Tensor[i, :, :, 0]) - ax[0].scatter(Tensor.shape[2]/4, Tensor.shape[1]/3, c ='black', s=50) - ax[1].plot(Time[:], TensorReconst[i, int(Tensor.shape[1]/3), int(Tensor.shape[2]/4), :], 'k-x', label = 'Reconstructed Data') - ax[1].plot(Time[:], Tensor[i, int(Tensor.shape[1]/3), int(Tensor.shape[2]/4), :], 'r-+', label = 'Real Data') - ax[1].set_xlabel('Time') - ax[1].set_ylabel('Data') - ax[1].legend(bbox_to_anchor=(1.04, 0.5), loc="center left", borderaxespad=0) - fig.tight_layout() - plt.savefig(f'{path0}/{type_}_HODMD_solution/d_{d}_tol_{varepsilon1}/OrigReconst_{i}.png') - st.pyplot(fig) - - if decision == 'Yes': - st.info('Comparison of Real Data vs Reconstruction') - if decision1 != 'V': - contour_anim.animated_plot(path0, Tensor, vel=0, Title = 'Real Data U velocity') - contour_anim.animated_plot(path0, TensorReconst, vel=0, Title = 'Reconstruction U velocity') - if decision1 != 'U': - contour_anim.animated_plot(path0, Tensor, vel=1, Title = 'Real Data V velocity') - contour_anim.animated_plot(path0, TensorReconst, vel=1, Title = 'Reconstruction V velocity') - - -def menu(): - st.title("Higher Order Dynamic Mode Decomposition, HODMD") - st.write(''' -HODMD is a data-driven method generally used in fluid dynamics and in the analysis of complex non-linear dynamical systems modeling several complex industrial applications. -There are two available option: HODMD and Multi-dimensional HODMD. The difference between these is that the first algorithm applies SVD on the input tensor and, therefore, -is suitable for matrices, while Multi-dimensional HODMD applies HOSVD on the input data, making it ideal for tensors. -''') - - path0 = os.getcwd() - - hodmd_type = st.selectbox("Which type of HODMD would you like to perform", ("HODMD", "Multi-dimensional HODMD")) - -### CLASSIC HODMD ## - if hodmd_type == "HODMD": - st.write(" ## HODMD - Parameter Configuration") - - # 1. Fetch data matrix or tensor - selected_data = 'Tensor_cylinder_Re100.mat' - Tensor = data_fetch.fetch_data(path0, selected_data) - - varepsilon1 = st.number_input(f'Introduce SVD tolerance value', min_value = 0.0, max_value = 0.5, value = float(1e-10), step = 0.001, format="%.10f") - varepsilon1 = float(varepsilon1) - - # 2. Select tolerance. DMDd tolerance = SVD tolerance - varepsilon2 = st.number_input(f'Introduce HODMD tolerance value', min_value = 0.0, max_value = 0.5, value = float(1e-3), step = 0.001, format="%.10f") - varepsilon2 = float(varepsilon2) - - max_snaps = Tensor.shape[-1] - - # 4. Select number of snapshots - SNAP = st.number_input(f'Introduce number of snapshots (must be lower than {max_snaps})', max_value = max_snaps, value = max_snaps, step = 1) - SNAP = int(SNAP) - - # 3. d parameter - d = st.number_input('Introduce number of HODMD windows (d)', min_value = 1, max_value = None, step = 1, value = int(np.round(SNAP/10))) - st.info(f'Interval of recommended number of HODMD windows (d): [{int(np.round(SNAP/10))}, {int(np.round(SNAP/2))}]. Other values are accepted') - d = int(d) - - # 5. Time gradient (time step can be equidistant or not) - deltaT = st.number_input('Introduce time gradient (deltaT)', min_value = 0.01, max_value = None, step = 0.01, value = 1., format = "%.2f") - deltaT = float(deltaT) - - dims = Tensor.ndim - - if dims > 2: - decision = st.radio('Represent real data and reconstruction videos', ('Yes', 'No')) - if decision == 'Yes': - decision1 = st.radio('For U, V or both velocities', ('U', 'V', 'Both')) - else: - decision1 = None - - go = st.button('Calculate') - - if go: - with st.spinner('Please wait until the run is complete'): - - HODMD_func(Tensor, deltaT, varepsilon1, varepsilon2, d, SNAP, path0, decision, decision1) - - st.success("Run complete!") - - st.warning(f"All files have been saved to HODMD_solution/d_{d}_tol_{varepsilon1}") - - st.info("Press 'Refresh' to run a new case") - Refresh = st.button('Refresh') - if Refresh: - st.stop() - - -### ITERATIVE HODMD ### - if hodmd_type == "Multi-dimensional HODMD": - st.write('The Multi-dimensional HODMD has two variants: Non-iterative and iterative.') - type = st.selectbox('Select a version', ('Non-iterative', 'Iterative')) - if type == 'Iterative': - st.write(" ## Iterative Multi-dimensional HODMD - Parameter Configuration") - iters = 1000 - type_ = 'it' - if type == 'Non-iterative': - st.write(" ## Non-iterative Multi-dimensional HODMD - Parameter Configuration") - iters = 1 - type_ = 'non-it' - - # 1. Select data matrix/tensor - selected_file = 'Tensor_cylinder_Re100.mat' - Tensor = data_fetch.fetch_data(path0, selected_file) - - varepsilon1 = st.number_input(f'Introduce SVD tolerance value', min_value = 0.0, max_value = 0.5, value = float(1e-10), step = 0.001, format="%.10f") - varepsilon1 = float(varepsilon1) - - # 2. Tolerances. DMDd tolerance = SVD tolerance - varepsilon2 = st.number_input(f'Introduce HODMD tolerance value', min_value = 0.0, max_value = 0.5, value = float(1e-3), step = 0.001, format="%.10f") - varepsilon2 = float(varepsilon2) - - max_snaps = Tensor.shape[-1] - - # 4. Number of snapshots - SNAP = st.number_input(f'Introduce number of snapshots (must be lower than {max_snaps})', max_value = max_snaps, value = max_snaps, step = 1) - - SNAP = int(SNAP) - - # 3. d parameter - d = st.number_input('Introduce number of HODMD windows (d)', min_value = 1, max_value = None, step = 1, value = int(np.round(SNAP/10))) - st.info(f'Interval of recommended number of HODMD windows (d): [{int(np.round(SNAP/10))}, {int(np.round(SNAP/2))}]. Other values are accepted') - d = int(d) - - # 5. Time gradient (time step can be equidistant or not) - deltaT = st.number_input('Introduce time gradient (deltaT)', min_value = 0.01, max_value = None, step = 0.01, value = 1., format = "%.2f") - deltaT = float(deltaT) - - # 6. Position of temporal variable (only if data is in tensor form) - - TimePos = int(Tensor.ndim) - - decision = st.radio('Represent real data and reconstruction videos', ('Yes', 'No')) - if decision == 'Yes': - decision1 = st.radio('For U, V or both velocities', ('U', 'V', 'Both')) - else: - decision1 = None - - go = st.button('Calculate') - - if go: - with st.spinner('Please wait until the run is complete'): - - HODMD_IT(Tensor, SNAP, varepsilon1, varepsilon2, deltaT, TimePos, d, path0, decision, decision1, type_, iters) - - st.success('Run complete!') - - st.warning(f"All files have been saved to {path0}/{type_}_HODMD_solution/d_{d}_tol_{varepsilon1}") - - st.info("Press 'Refresh' to run a new case") - Refresh = st.button('Refresh') - if Refresh: - st.stop() diff --git a/v0.1_ModelFLOWs_web/hodmd_pred_page.py b/v0.1_ModelFLOWs_web/hodmd_pred_page.py deleted file mode 100644 index 6e1972b..0000000 --- a/v0.1_ModelFLOWs_web/hodmd_pred_page.py +++ /dev/null @@ -1,561 +0,0 @@ -import DMDd -import streamlit as st -import os -import numpy as np -import matplotlib.pyplot as plt -import scipy.io # To read data from .mat files. -from math import floor -import data_fetch -import hosvd -import contour_anim -import pandas as pd - -def menu(): - st.title("Prediction using Higher Order Dynamic Mode Decomposition, HODMD") - st.write(''' -HODMD is a data-driven method generally used in fluid dynamics and in the analysis of complex non-linear dynamical systems modeling several complex industrial applications. -There are two available option: HODMD and Multi-dimensional HODMD. The difference between these is that the first algorithm applies SVD on the input tensor and, therefore, -is suitable for matrices, while Multi-dimensional HODMD applies HOSVD on the input data, making it ideal for tensors. All of these variants are used to make predictions in time. -''') - - path0 = os.getcwd() - - hodmd_type = st.selectbox("Which type of HODMD would you like to perform", ("Predictive HODMD", "Multi-dimensional predictive HODMD")) - - -### CLASSIC HODMD ## - if hodmd_type == "Predictive HODMD": - st.write(" ## Predictive HODMD - Parameter Configuration") - - # 1. Fetch data matrix or tensor - selected_data = 'Tensor_cylinder_Re100.mat' - Tensor = data_fetch.fetch_data(path0, selected_data) - - varepsilon1 = st.number_input(f'Introduce SVD tolerance value', min_value = 0.0, max_value = 0.5, value = float(1e-10), step = 0.001, format="%.10f") - varepsilon1 = float(varepsilon1) - - # 2. Select tolerance. DMDd tolerance - varepsilon = st.number_input(f'Introduce HODMD tolerance value', min_value = 0.0, max_value = 0.5, value = float(1e-3), step = 0.001, format="%.10f") - varepsilon = float(varepsilon) - - # 4. Select number of snapshots - max_snaps = Tensor.shape[-1] - SNAP = st.number_input(f'Introduce number of snapshots (must be lower than {max_snaps})', max_value = max_snaps, value = max_snaps, step = 1) - SNAP = int(SNAP) - - # 3. d parameter - d = st.number_input('Introduce number of HODMD windows (d)', min_value = 1, max_value = None, step = 1, value = int(np.round(SNAP/10))) - st.info(f'Interval of recommended number of HODMD windows (d): [{int(np.round(SNAP/10))}, {int(np.round(SNAP/2))}]. Other values are accepted') - d = int(d) - - # 5. Time gradient (time step can be equidistant or not) - max_snaps = Tensor.shape[-1] - deltaT = st.number_input('Introduce time gradient (deltaT)', min_value = 0.01, max_value = None, step = 0.01, value = 1., format = "%.2f") - deltaT = float(deltaT) - - - Time = np.linspace(0,SNAP-1,num=SNAP)*deltaT - - # Prediction time - st.write(f'The temporal interval of the data analysed is [{int(Time[0])},{int(Time[-1])}]') - TimeFin = st.number_input('Introduce final time to predict your solution', min_value = int(Time[-1]), max_value = None, value = int(Time[-1])*2, step = 1) - TimeFin = int(TimeFin) - SnapDif=floor(abs(TimeFin-Time[-1])*deltaT) - TimePred = np.linspace(0,SNAP+SnapDif-1,num=SNAP+SnapDif)*deltaT - - - if not os.path.exists(f'{path0}/HODMD_pred_solution'): - os.mkdir(f'{path0}/HODMD_pred_solution') - if not os.path.exists(f'{path0}/HODMD_pred_solution/d_{d}_tol_{varepsilon1}'): - os.mkdir(f'{path0}/HODMD_pred_solution/d_{d}_tol_{varepsilon1}') - if not os.path.exists(f'{path0}/HODMD_pred_solution/d_{d}_tol_{varepsilon1}/DMDmodes'): - os.mkdir(f'{path0}/HODMD_pred_solution/d_{d}_tol_{varepsilon1}/DMDmodes') - - Tensor = Tensor[..., :SNAP] - Tensor0 = Tensor.copy() - dims = Tensor.ndim - shape = Tensor.shape - - if dims > 2: - dims_prod = np.prod(shape[:-1]) - Tensor = np.reshape(Tensor, (dims_prod, shape[-1])) - - # Set tolerance for growth rates - TolGR=0 - TolGRset = st.radio('Do you want to retain the modes with growth rate (in absolute value) < tolerance?', ('Yes', 'No')) - if (TolGRset=='y' or TolGRset=='Y' or TolGRset=='yes' or TolGRset=='Yes' or TolGRset=='YES'): - TolGR=1 - TolGRvalue = st.number_input('Introduce the tolerance for the growth rate of the modes retained', min_value = 0., max_value = None, value = 0.5, step = 0.001, format = '%.3f') - if not TolGRvalue: - TolGRvalue = 0.5 - else: - TolGRvalue = float(TolGRvalue) - TolGRset2 = st.radio('Do you want to set the growth rate to 0 for the temporal predictions', ('No', 'Yes')) - if (TolGRset2=='y' or TolGRset2=='Y' or TolGRset2=='yes' or TolGRset2=='Yes' or TolGRset2=='YES'): - GRset=1 - else: - GRset=0 - - if dims > 2: - decision = st.radio('Represent real data and reconstruction videos', ('Yes', 'No')) - if decision == 'Yes': - decision1 = st.radio('For U, Tensor or both velocities', ('U', 'Tensor', 'Both')) - - go = st.button('Calculate') - if go: - with st.spinner('Please wait for the run to complete'): - - notone=0 - for i in range(0,np.size(np.shape(Tensor))): - if np.shape(Tensor)[i] != 1: - notone=notone+1 - - if notone<=2: - if d==1: - st.write('Performing DMD') - st.write("") - [u,Amplitude,Eigval,GrowthRate,Frequency,DMDmode] = DMDd.dmd1(Tensor, Time, varepsilon1, varepsilon) - st.write("") - st.write('DMD complete') - dt=Time[1]-Time[0] - icomp=complex(0,1) - if TolGR==1: - ind = np.where(np.abs(GrowthRate) 1: - st.write(f'##### Iteration number: {zz+1}') - - if zz != 0: - del S,U,Frequency,GrowthRate,hatT,hatMode,sv,TensorR,nnin - TensorR = TensorReconst - # Reconstruction - ## Perform HOSVD decomposition to calculate the reduced temporal matrix: hatT - nnin = nn - nnin = tuple(nnin) - st.write('Performing HOSVD') - [hatT,U,S,sv,nn1,n, _] = hosvd.HOSVD(TensorR,varepsilon1,nn,nn0,TimePos) - st.write('HOSVD complete') - st.write("") - ## Perform HODMD to the reduced temporal matrix hatT: - - st.write('Performing HODMD') - st.write("") - [hatMode,Amplitude,Eigval,GrowthRate,Frequency] = DMDd.hodmd_IT(hatT,d,Time,varepsilon1,varepsilon2) - st.write("") - st.write('HODMD complete') - - ## Reconstruct the original Tensor using the DMD expansion: - TensorReconst = DMDd.reconst_IT(hatMode,Time,U,S,sv,nn1,TimePos,GrowthRate,Frequency) - - ## Print outcome: - - RRMSE = np.linalg.norm(np.reshape(Tensor - TensorReconst,newshape=(np.size(Tensor),1)),ord=2)/np.linalg.norm(np.reshape(Tensor,newshape=(np.size(Tensor),1))) - st.write(f'\n###### Relative mean square error made in the calculations: {np.round(RRMSE*100, 3)}%') - st.write(""" - - """) - - - GRFreqAmp = np.zeros((np.size(GrowthRate),3)) - for ind in range (0,np.size(GrowthRate)): - GRFreqAmp[ind,0] = GrowthRate[ind] - GRFreqAmp[ind,1] = Frequency[ind] - GRFreqAmp[ind,2] = Amplitude[ind] - - - ## Break the loop when the number of singular values is the same in two consecutive iterations: - - num = 0 - for ii in range(1,np.size(nn1)): - if nnin[ii]==nn1[ii]: - num = num+1 - - if num==np.size(nn1)-1: - break - nn = nn1 - - ######### PREDICTIONS - - - if TolGR==1: - ind = np.where(np.abs(GrowthRate) abcl', S, U[0], U[1], U[2]) - - st.info(f'All SVD mode plots will be saved to {path0}/HOSVD_solution/tol_{varepsilon1}/SVD_modes') - for ModComp in range(SVD_modes.shape[0]): - for ModeNum in range(SVD_modes.shape[-1]): - fig, ax = plt.subplots() - ax.contourf(SVD_modes[ModComp,:,:,ModeNum]) - ax.set_title(f'Component {ModComp+1} - SVD mode {ModeNum+1}', fontsize = 14) - ax.set_xlabel('X') - ax.set_ylabel('Y') - fig.tight_layout() - plt.savefig(f'{path0}/HOSVD_solution/tol_{varepsilon1}/SVD_modes/Comp_{ModComp+1}_SVDmodes_{ModeNum+1}.png') - plt.close(fig) - - st.info(f'Showing first 3 SVD modes for {SVD_modes.shape[0]} components') - - for ModComp in range(SVD_modes.shape[0]): - for ModeNum in range(0, 3): - fig, ax = plt.subplots() - ax.contourf(SVD_modes[ModComp,:,:,ModeNum]) - ax.set_title(f'Component {ModComp+1} - SVD mode {ModeNum+1}', fontsize = 14) - ax.set_xlabel('X') - ax.set_ylabel('Y') - ax.axis('off') - fig.tight_layout() - st.pyplot(fig) - - np.save(f'{path0}/HOSVD_solution/tol_{varepsilon1}/TensorReconst', TT) - np.save(f'{path0}/HOSVD_solution/tol_{varepsilon1}/SVD_modes', SVD_modes) - - if decision == 'Yes': - st.write('Comparison of Real Data vs Reconstruction') - if decision1 != 'V': - contour_anim.animated_plot(path0, Tensor, vel=0, Title = 'Real Data U velocity') - contour_anim.animated_plot(path0, TT, vel=0, Title = 'Reconstruction U velocity') - if decision1 != 'U': - contour_anim.animated_plot(path0, Tensor, vel=1, Title = 'Real Data V velocity') - contour_anim.animated_plot(path0, TT, vel=1, Title = 'Reconstruction V velocity') - - -def menu(): - st.title("Higher-Order Singular Value Decomposition, HOSVD") - st.write("""# -In multilinear algebra, the Higher-Order Singular Value Decomposition (HOSVD) of a tensor is a specific orthogonal Tucker decomposition. -It may be regarded as one generalization of the matrix singular value decomposition. -It has applications in computer vision, computer graphics, machine learning, scientific computing, and signal processing. - """) - st.write(" ## HOSVD - Parameter Configuration") - - path0 = os.getcwd() - - # 1. Data selection - selected_file = 'Tensor_cylinder_Re100.mat' - Tensor = data_fetch.fetch_data(path0, selected_file) - - # 2. Tolerances. DMDd tolerance = SVD tolerance - varepsilon1 = st.number_input(f'Introduce SVD tolerance', min_value = 0.000, max_value = 0.5, value = 0.0001, step = 0.0001, format="%.10f") - varepsilon1 = float(varepsilon1) - - SNAP = st.number_input(f'Introduce number of snapshots (must be lower than {Tensor.shape[-1]})', max_value = int(Tensor.shape[-1]), value = int(Tensor.shape[-1]), step = 1) - SNAP = int(SNAP) - - decision = st.radio('Represent real data and reconstruction videos', ('Yes', 'No')) - if decision == 'Yes': - decision1 = st.radio('For U, V or both velocities', ('U', 'V', 'Both')) - else: - decision1 = None - - TimePos = int(Tensor.ndim) - - go = st.button('Calculate') - - if go: - with st.spinner('Please wait for the run to complete'): - - hosvd_fun(SNAP, varepsilon1, Tensor, TimePos, path0, decision, decision1) - st.success('Run complete!') - - st.warning(f"All files have been saved to {path0}/HOSVD_solution/tol_{varepsilon1}") - st.info("Press 'Refresh' to run a new case") - Refresh = st.button('Refresh') - if Refresh: - st.stop() \ No newline at end of file diff --git a/v0.1_ModelFLOWs_web/hybCNN_model_page.py b/v0.1_ModelFLOWs_web/hybCNN_model_page.py deleted file mode 100644 index 38030ab..0000000 --- a/v0.1_ModelFLOWs_web/hybCNN_model_page.py +++ /dev/null @@ -1,777 +0,0 @@ -import numpy as np -import pandas as pd -import os -import matplotlib.pyplot as plt -import time -from numpy import linalg as LA -from sklearn.metrics import mean_squared_error, mean_absolute_error, median_absolute_error, r2_score -import tensorflow as tf -from tensorflow import keras -from tensorflow.keras.layers import Dense, Reshape, TimeDistributed, Flatten, Convolution1D -from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau -from tensorflow.keras.models import Model -from tensorflow.keras.layers import Input - -import streamlit as st - - -def menu(): - st.title("Hybrid CNN Model") - st.write(""" -This algorithm proposes a hybrid predictive Reduced Order Model to solve reacting flow problems. -This algorithm is based on a dimensionality reduction using Proper Orthogonal Decomposition (POD) combined with deep learning architectures to predict the temporal coefficients of the POD modes. -The deep learning architecture implemented is: convolutional neural network (CNN). - """) - st.write(" ## CNN Model - Parameter Configuration") - - def mean_absolute_percentage_error(y_true, y_pred): - epsilon = 1e-10 - y_true, y_pred = np.array(y_true), np.array(y_pred) - return np.mean(np.abs((y_true - y_pred) / np.maximum(epsilon,np.abs(y_true)))) * 100 - - def smape(A, F): - return ((100.0/len(A)) * np.sum(2 * np.abs(F - A) / (np.abs(A) + np.abs(F))+ np.finfo(float).eps)) - - def RRMSE(Tensor0, Reconst): - RRMSE = np.linalg.norm(np.reshape(Tensor0-Reconst,newshape=(np.size(Tensor0),1)),ord=2)/np.linalg.norm(np.reshape(Tensor0,newshape=(np.size(Tensor0),1))) - return(RRMSE) - - def custom_loss(y_actual,y_pred): - if decision2 == 'no-scaling': - Ten = tf.einsum('ij,kj->ki',tf.convert_to_tensor(U, dtype=tf.float32),y_pred) - Ten_actual = tf.einsum('ij,kj->ki',tf.convert_to_tensor(U, dtype=tf.float32),y_actual) - elif decision2 == 'MaxPerMode': - Ten = tf.einsum('ij,kj->ki',tf.convert_to_tensor(U, dtype=tf.float32),y_pred* (max_val)) - Ten_actual = tf.einsum('ij,kj->ki',tf.convert_to_tensor(U, dtype=tf.float32),y_actual* (max_val)) - elif decision2 == 'auto': - Ten = tf.einsum('ij,kj->ki',tf.convert_to_tensor(U, dtype=tf.float32),y_pred* (std_val)+med_val) - Ten_actual = tf.einsum('ij,kj->ki',tf.convert_to_tensor(U, dtype=tf.float32),y_actual* (std_val)+med_val) - elif decision2 == 'range': - Ten = tf.einsum('ij,kj->ki',tf.convert_to_tensor(U, dtype=tf.float32),y_pred* (range_val)+min_val) - Ten_actual = tf.einsum('ij,kj->ki',tf.convert_to_tensor(U, dtype=tf.float32),y_actual* (range_val)+min_val) - - Ten = tf.keras.layers.Reshape((nx*ny,nv))(Ten) - Ten_actual = tf.keras.layers.Reshape((nx*ny,nv))(Ten_actual) - - output_list = [] - output_list2 = [] - - for iter in range(Ten.shape[2]-1): - variable = Ten[:,:,iter+1] - variable_ac = Ten_actual[:,:,iter+1] - - if decision1 == 'no-scaling': - output_list.append(variable) - output_list2.append(variable_ac) - else: - Med = tf.cast(tf.reshape(Media_tiempo[iter+1,:,:,0],[nx*ny]),tf.float32) - output_list.append(variable*(Factor[iter+1])+Media[iter+1]+Med) - output_list2.append(variable_ac*(Factor[iter+1])+Media[iter+1]+Med) - - Ten2 = tf.stack(output_list) - Ten2_ac = tf.stack(output_list2) - - pred_sum_spec = tf.math.reduce_sum(Ten2,axis=0) - pred_sum_spec = Reshape((nx,ny))(pred_sum_spec) - - ac_sum_spec = tf.math.reduce_sum(Ten2_ac,axis=0) - ac_sum_spec = Reshape((nx,ny))(ac_sum_spec) - - custom_loss = tf.reduce_mean(tf.square(y_actual-y_pred)) + tf.reduce_mean(tf.square(ac_sum_spec - pred_sum_spec)) - - return custom_loss - - path0 = os.getcwd() - - if not os.path.exists(f'{path0}/CNN_solution'): - os.mkdir(f'{path0}/CNN_solution') - - selection = 'Tensor_cylinder_Re100.mat' - - decision1 = st.selectbox("Select the scaling of the variables implemented", ("auto", "pareto", "range", "no-scaling")) - decision2 = st.selectbox("Select the scaling of the temporal coefficients implemented", ("MaxPerMode", "auto", "range", "no-scaling")) - - n_modes = st.number_input('Select the number of modes to retain during SVD', min_value = 0, max_value = None, value = 15, step = 1) - - # Inputs - test_prop = 0.20 # test set proportion - val_prop = 0.20 # validation set proportion val_length = (1-test_prop)*val_prop // train_length = (1-test_prop)*(1-val_prop) - batch_size = st.slider('Select Batch Size', 4, 64, value = 8, step = 2) - k = 10 # number of snapshots used as predictors - p = 6 # number of snapshots used as time-ahead predictions - epoch = st.slider('Select training epochs', 0, 500, value = 200, step=10) - - act_func1 = st.selectbox('Select hidden layer activation function', ('relu', 'elu', 'sigmoid', 'softmax', 'tanh')) - act_func2 = st.selectbox('Select output layer activation function', ('tanh', 'relu', 'sigmoid')) - neurons = st.slider('Select number of neurons per layer', 1, 150, value = 30, step = 1) - shared_dim = st.slider('Select number of shared dims', 1, 100, value = 20, step = 1) - lr = st.number_input('Select learning rate', min_value = 0.0001, max_value = 0.01, value = 0.002, step = 0.0001, format = '%.4f') #learning_rate - lf = 'mse' - - go = st.button("Calculate") - - if go: - with st.spinner("Please wait while the model is being trained"): - if not os.path.exists(f'{path0}/hyb_CNN_solution/'): - os.mkdir(f'{path0}/hyb_CNN_solution/') - output1 = 'yes' # Error made by SVD in each variable - output2 = 'yes' # Plot Comparison Coefficients in all indexes - output3 = 'yes' # Figure loss function - output4 = 'yes' # RRMSE of each variable (Original Tensor) - output5 = 'yes' # RRMSE of each variable (Truncated Tensor) - output6 = 'yes' # Plot RRMSE temporal matrix - output7 = 'yes' # Comparison of the modes - output8 = 'yes' # Comparison of the snapshots - - if output7 == 'yes': # Indicate the number of the mode you want to compare - nModes = [1,2,3,4,5] - if output8 == 'yes': # Indicate the number of the variable and time step (nv, nt) - index = np.array([ - [0,1], - [1,1]]) - - # Load dataset - import hdf5storage - - Tensor_ = hdf5storage.loadmat(f'{path0}/{selection}') - data = list(Tensor_.values())[-1] - data = tf.transpose(data, (3, 1, 2, 0)) - - nt, ny, nx, nv = np.shape(data) - - # Define mesh / Load it - x = np.linspace(0, 1, nx) - y = np.linspace(0, 1, ny) - xv, yv = np.meshgrid(y, x) - - # Original tensor reconstruction - Mat_orig = np.reshape(data,[nt,nv*nx*ny]) - Mat_orig = np.transpose(Mat_orig) - Tensor_orig = np.reshape(Mat_orig,[nv,nx,ny,nt], order='F') - sum_spec = np.sum(np.sum(Tensor_orig[1:,:,:,:],axis=3),axis=0)/(nt) - - # Centering and scaling - if decision1 == 'no-scaling': - pass - else: - #Dummy variables - Tensor_orig1 = np.zeros(Tensor_orig.shape) - Factor = np.zeros(data.shape[3]) - Media = np.zeros(data.shape[3]) - Media_tiempo = np.zeros(np.shape(Tensor_orig)) - - for iter in range(nt): - Media_tiempo[:,:,:,iter] = np.mean(Tensor_orig,axis=3) - - Tensor_orig2 = Tensor_orig-Media_tiempo - - for iter in range(data.shape[3]): - variable = Tensor_orig2[iter,:,:,:] - if decision1 == 'range': - Factor[iter] = np.amax(variable)-np.amin(variable) #Range scaling - if decision1 == 'auto': - Factor[iter] = np.std(variable) #Auto scaling - if decision1 == 'pareto': - Factor[iter] = np.sqrt(np.std(variable)) #Pareto scaling - Media[iter] = np.mean(variable) - Tensor_orig1[iter,:,:,:] = (variable-Media[iter])/(Factor[iter]) - - Mat_orig = np.reshape(Tensor_orig1,[nv*nx*ny,nt],order='F') - - # Perform SVD - U, S, V = np.linalg.svd(Mat_orig, full_matrices=False) - - Modes = n_modes - S = np.diag(S) - - U = U[:,0:Modes] - S = S[0:Modes,0:Modes] - V = V[0:Modes,:] - - # AML Matrix construction - AML = np.dot(S,V) - tensor = AML - - if output1 == 'yes': - Mat_trunc = np.dot(U,AML) - Ten_trunc = np.reshape(Mat_trunc,[nv,nx,ny,nt], order='F') - - if decision1 == 'no': - pass - else: - for iter in range(data.shape[3]): - variable = Ten_trunc[iter,:,:,:] - Ten_trunc[iter,:,:,:] = variable*(Factor[iter])+Media[iter]+Media_tiempo[iter,:,:,:] - - if output1 == 'yes': - st.write('\n') - RRMSE_SVD = np.zeros(data.shape[3]) - for iter in range(data.shape[3]): - diff = Ten_trunc[iter,:,:,:] - Tensor_orig[iter,:,:,:] - RRMSE_SVD[iter] = LA.norm(np.reshape(diff.flatten(),(-1,1)),ord=2)/LA.norm(np.reshape(Tensor_orig[iter,:,:,:].flatten(),(-1,1)),ord=2) - st.write(f'###### SVD RRMSE error for variable {iter+1}: {np.round(RRMSE_SVD[iter]*100, 3)}%\n') - st.write('\n') - np.save(f'{path0}/hyb_CNN_solution/RRMSE_SVD.npy',RRMSE_SVD) - - # Scaling of temporal coeficients - if decision2 == 'no': - tensor_norm = tensor - elif decision2 == 'range': - min_val = np.amin(tensor) - range_val = np.ptp(tensor) - tensor_norm = (tensor-min_val)/range_val - elif decision2 == 'auto': - med_val = np.mean(tensor) - std_val =np.std(tensor) - tensor_norm = (tensor-med_val)/std_val - elif decision2 == 'MaxPerMode': - max_val = sum(np.amax(np.abs(tensor),axis=1)) - tensor_norm = tensor / max_val - - # Dataset configuration - total_length = tensor_norm.shape[1] # number of snapshots - channels_n = 0 # number of channels, assuming each snapshot is an image with n channels - dim_x = tensor_norm.shape[0] # following the simil that each snapshot is an image, the dimension x of that image - dim_y = 0 # following the simil that each snapshot is an image, the dimension y of that image - - print('total_length: ', total_length) - print('channels_n: ', channels_n) - print('dim_x: ', dim_x) - print('dim_y: ', dim_y) - - # Preparing training and test datasets - import math - class DataGenerator(tf.keras.utils.Sequence): - 'Generates data for Keras' - def __init__(self, data, list_IDs, batch_size=5, dim=(20), - k = 624, p = 1, - shuffle=True, till_end = False, only_test = False): - 'Initialization' - self.data = data - self.dim = dim - self.batch_size = batch_size - self.list_IDs = list_IDs - self.shuffle = shuffle - self.p = p - self.k = k - self.till_end = till_end - self.only_test = only_test - self.on_epoch_end() - - def __len__(self): - 'Denotes the number of batches per epoch' - if self.till_end: - lenx = math.ceil((len(self.list_IDs) / self.batch_size)) - else: - lenx = int(np.floor(len(self.list_IDs) / self.batch_size)) - return lenx - - def __getitem__(self, index): - 'Generate one batch of data' - # Generate indexes of the batch - indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size] - - # Find list of IDs - list_IDs_temp = [self.list_IDs[k] for k in indexes] - - # Generate data - X, y = self.__data_generation(list_IDs_temp) - if self.only_test: - return X - else: - return X, y - - def on_epoch_end(self): - 'Updates indexes after each epoch' - self.indexes = np.arange(len(self.list_IDs)) - if self.shuffle == True: - np.random.shuffle(self.indexes) - - def __data_generation(self, list_IDs_temp): - 'Generates data containing batch_size samples' # X : (n_samples, *dim, depth) - # Initialization - X = np.empty((self.batch_size, self.dim, self.k)) - y = [np.empty((self.batch_size, self.dim))]*self.p - - y_inter = np.empty((self.batch_size, self.dim, p)) - - # Generate data - lenn = len(list_IDs_temp) - for i, ID in enumerate(list_IDs_temp): - # Store Xtrain - X[i,:,:] = self.data[:,ID:ID+k] - # Store Ytrain - y_inter[i,:,:] = self.data[:,ID+k:ID+k+p] - - for j in range(self.p): - y[j] = y_inter[:,:,j] - y[j] = np.reshape(y[j], (lenn, -1)) - X = X.transpose((0,2,1)) - return X, y - - # Prepare the dataset indexes - period_transitorio = 0 - stride_train = 1 - stride_val = 1 - stride_test = 1 - dim=(dim_x) - - test_length = int(test_prop * total_length) - val_length = int((total_length - test_length) * val_prop) - train_length = total_length - val_length - test_length - - if int(train_length-period_transitorio-(k+p)) < 0: - train_n = 0 - elif int((train_length-period_transitorio-(k+p))//stride_train) == 0: - train_n = 1 - else: - train_n = int(((train_length-period_transitorio)-(k+p))//stride_train) - - if int(test_length-(k+p)) < 0: - test_n = 0 - elif int((test_length-(k+p))//stride_test) == 0: - test_n = 1 - else: - test_n = int((test_length-(k+p))//stride_test) - - if int(val_length-(k+p)) < 0: - val_n = 0 - elif int((val_length-(k+p))//stride_val) == 0: - val_n = 1 - else: - val_n = int((val_length-(k+p))//stride_val) - - # Indices for the beginnings of each batch - train_idxs = np.empty([train_n], dtype='int') - val_idxs = np.empty([val_n], dtype='int') - test_idxs = np.empty([test_n], dtype='int') - - j = period_transitorio - for i in range(train_n): - train_idxs[i] = j - j = j+stride_train - - j = train_length - for i in range(val_n): - val_idxs[i] = j - j = j+stride_val - - j = train_length + val_length - for i in range(test_n): - test_idxs[i] = j - j = j+stride_test - - # Generators - training_generator = DataGenerator(tensor_norm, train_idxs, - dim = dim, - batch_size = batch_size, - k = k, p = p, till_end = False, - only_test = False, - shuffle = True) - validation_generator = DataGenerator(tensor_norm, val_idxs, - dim = dim, - batch_size = batch_size, - k = k, p = p, till_end = False, - only_test = False, - shuffle = False) - test_generator = DataGenerator(tensor_norm, test_idxs, - dim = dim, - batch_size = batch_size, - k = k, p = p, till_end = False, - only_test = True, - shuffle = False) - if output2 == 'yes': - all_idxs = np.int_(np.linspace(0,nt-k-p,nt-k-p+1)) - all_generator = DataGenerator(tensor_norm, all_idxs, - dim = dim, - batch_size = batch_size, - k = k, p = p, till_end = False, - only_test = True, - shuffle = False) - - print ('test_length: ', test_length) - print ('val_length: ', val_length) - print ('train_length: ', train_length) - print() - print ('test_n: ', test_n) - print ('val_n: ', val_n) - print ('train_n: ', train_n) - print() - print('test_generator_len: ', len(test_generator)) - print('validation_generator_len: ', len(validation_generator)) - print('training_generator_len: ', len(training_generator)) - - # Prepare Ytest - test_n_adjusted = int(test_n/batch_size)*batch_size # multiplo de batch_size - Ytest = [np.empty([test_n_adjusted, dim_x], dtype='float64')] * p - Ytest_fl = [np.empty([test_n_adjusted, dim_x ], dtype='float64')] * p - - Ytest_inter = np.empty([test_n_adjusted, dim_x, p], dtype='float64') - - for i in range(test_n_adjusted): - j = test_idxs[i] - Ytest_inter[i,:,:] = tensor_norm[:,j+k:j+k+p] - - for r in range(p): - Ytest[r] = Ytest_inter[:,:,r] - Ytest_fl[r] = np.copy(np.reshape(Ytest[r], (test_n_adjusted, -1))) - - # Preparing the model - def create_model_cnn(in_shape, out_dim, p = 3, shared_dim = 1000, act_fun= act_func1, act_func2 = act_func2): - x = Input(shape=in_shape) - - v = Convolution1D(30,3)(x) - v = Convolution1D(60,3)(v) - v = Dense(neurons, activation= act_fun)(v) - - tt = [1]*p - - r = TimeDistributed( Dense(shared_dim, activation=act_fun))(v) - s = tf.split(r, tt, 1) - for i in range(p): - s[i] = Flatten()(s[i]) - - o = [] - for i in range(p): - o.append( Dense(out_dim, activation=act_func2)(s[i]) ) - - m = Model(inputs=x, outputs=o) - opt = keras.optimizers.Adam(learning_rate=lr) - m.compile(loss=lf, optimizer=opt, metrics=[lf]) - - return(m) - - # Create the model - in_shape = [k, dim_x] - out_dim = dim_x - - print(in_shape) - print(out_dim) - - model= create_model_cnn(in_shape,out_dim,p,shared_dim) - - # save the best weights - save_string = 'colab_Cilin3D_AML_20_CNN v1' - - # save the best weights - save_best_weights = f'{path0}/hyb_CNN_solution/' + save_string + '.h5' - save_summary_stats = f'{path0}/hyb_CNN_solution/' + save_string + '.csv' - save_last_weights = f'{path0}/hyb_CNN_solution/' + save_string + '_last_w.h5' - save_results_metrics = f'{path0}/hyb_CNN_solution/' + save_string + '_results_metrics.csv' - - np.random.seed(247531338) - t0 = time.time() - # Model training - callbacks = [ModelCheckpoint(save_best_weights, monitor='val_loss', save_best_only=True, mode='auto'), - EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='auto', min_delta = 0.001)] - #,ReduceLROnPlateau(monitor='val_loss', factor=0.8,patience=5,min_lr=1e-6)] - - - model.summary(print_fn=lambda x: st.text(x)) - - history = model.fit(training_generator, - validation_data=validation_generator, - epochs=epoch, - verbose=1, - callbacks=callbacks) - - t1 = time.time() - print("Minutes elapsed: %f" % ((t1 - t0) / 60.)) - - model.save_weights(save_last_weights) - st.success('The model has been trained!') - - st.write("### Model Training Results - 1D CNN") - - - if not os.path.exists(f'{path0}/hyb_CNN_solution/plots'): - os.mkdir(f'{path0}/hyb_CNN_solution/plots') - - # Aggregate the summary statistics - summary_stats = pd.DataFrame({'epoch': [ i + 1 for i in history.epoch ], - #'train_acc': history.history['mean_squared_error'], - #'valid_acc': history.history['val_mean_squared_error'], - 'train_loss': history.history['loss'], - 'valid_loss': history.history['val_loss']}) - - summary_stats.to_csv(save_summary_stats) - - if output3 == 'yes': - fig, ax = plt.subplots() - - ax.plot(summary_stats.train_loss, 'b',linewidth = 3) # blue - ax.plot(summary_stats.valid_loss, 'g--',linewidth = 3) # green - ax.set_xlabel('Training epochs',fontsize = 12) - ax.set_ylabel('Loss function',fontsize = 12) - - ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) - - plt.savefig(f'{path0}/hyb_CNN_solution/loss.png') - st.pyplot(fig) - plt.close() - - # Find the min validation loss during the training - min_loss, idx = min((loss, idx) for (idx, loss) in enumerate(history.history['val_loss'])) - print('Minimum val_loss at epoch', '{:d}'.format(idx+1), '=', '{:.6f}'.format(min_loss)) - min_loss = round(min_loss, 4) - - # Inference - t0 = time.time() - - model.load_weights(save_best_weights) - Ytest_hat_fl = model.predict(test_generator, verbose=1) - - t1 = time.time() - - lag = 0 - num_sec = Ytest_hat_fl[0].shape[0] - results_table = pd.DataFrame(index=['MSE','MAE','MAD','R2','SMAPE','RRMSE'],columns=range(num_sec)) - for i in range(num_sec): - results_table.iloc[0,i] = mean_squared_error( Ytest_fl[lag][i,:], Ytest_hat_fl[lag][i,:]) - results_table.iloc[1,i] = mean_absolute_error( Ytest_fl[lag][i,:], Ytest_hat_fl[lag][i,:]) - results_table.iloc[2,i] = median_absolute_error( Ytest_fl[lag][i,:], Ytest_hat_fl[lag][i,:]) - results_table.iloc[3,i] = r2_score( Ytest_fl[lag][i,:], Ytest_hat_fl[lag][i,:]) - results_table.iloc[4,i] = smape( Ytest_fl[lag][i,:], Ytest_hat_fl[lag][i,:]) - results_table.iloc[5,i] = RRMSE( np.reshape(Ytest_fl[lag][i,:],(-1,1)), np.reshape(Ytest_hat_fl[lag][i,:],(-1,1))) - - - results_table_global = pd.DataFrame(index=['MSE','MAE','MAD','R2','SMAPE','RRMSE'],columns=range(p)) - for i in range(p): - results_table_global.iloc[0,i] = mean_squared_error(Ytest_fl[i].flatten(), Ytest_hat_fl[i].flatten()) - results_table_global.iloc[1,i] = mean_absolute_error(Ytest_fl[i].flatten(), Ytest_hat_fl[i].flatten()) - results_table_global.iloc[2,i] = median_absolute_error(Ytest_fl[i].flatten(), Ytest_hat_fl[i].flatten()) - results_table_global.iloc[3,i] = r2_score(Ytest_fl[i].flatten(), Ytest_hat_fl[i].flatten()) - results_table_global.iloc[4,i] = smape(Ytest_fl[i].flatten(), Ytest_hat_fl[i].flatten()) - results_table_global.iloc[5,i] = RRMSE( np.reshape(Ytest_fl[i].flatten(),(-1,1)), np.reshape(Ytest_hat_fl[i].flatten(),(-1,1))) - - results_table_global['mean'] = results_table_global.mean(axis=1) - - fig, ax = plt.subplots() - ax.plot(range(num_sec),results_table.iloc[0,:], 'b') # green - ax.set_title("MSE score vs. forecast time index",fontsize = 14) - ax.set_xlabel("time", fontsize = 12) - ax.set_ylabel("MSE",fontsize = 12) - plt.savefig(f'{path0}/hyb_CNN_solution/plots/MSEscore.png') - st.pyplot(fig) - plt.close() - - fig, ax = plt.subplots() - ax.plot(range(num_sec),results_table.iloc[1,:], 'b') # green - ax.set_title("MAE score vs. forecast time index",fontsize = 14) - ax.set_xlabel("time", fontsize = 12) - ax.set_ylabel("MAE",fontsize = 12) - plt.savefig(f'{path0}/hyb_CNN_solution/plots/MAEscore.png') - st.pyplot(fig) - plt.close() - - fig, ax = plt.subplots() - ax.plot(range(num_sec),results_table.iloc[2,:], 'b') # green - ax.set_title("MAD score vs. forecast time index",fontsize = 14) - ax.set_xlabel("time", fontsize = 12) - ax.set_ylabel("MAD",fontsize = 12) - plt.savefig(f'{path0}/hyb_CNN_solution/plots/MADscore.png') - st.pyplot(fig) - plt.close() - - fig, ax = plt.subplots() - ax.plot(range(num_sec),results_table.iloc[3,:], 'b') # green - ax.set_title("R2 score vs. forecast time index",fontsize = 14) - ax.set_xlabel("time", fontsize = 12) - ax.set_ylabel("R2",fontsize = 12) - plt.savefig(f'{path0}/hyb_CNN_solution/plots/R2score.png') - st.pyplot(fig) - plt.close() - - fig, ax = plt.subplots() - ax.plot(range(num_sec),results_table.iloc[4,:], 'b') # green - ax.set_title("SMAPE score vs. forecast time index",fontsize = 14) - ax.set_xlabel("time", fontsize = 12) - ax.set_ylabel("SMAPE",fontsize = 12) - plt.savefig(f'{path0}/hyb_CNN_solution/plots/SMAPEscore.png') - st.pyplot(fig) - plt.close() - - fig, ax = plt.subplots() - ax.plot(range(num_sec),results_table.iloc[5,:], 'b') # green - ax.set_title("RRMSE score vs. forecast time index",fontsize = 14) - ax.set_xlabel("time", fontsize = 12) - ax.set_ylabel("RRMSE",fontsize = 12) - plt.savefig(f'{path0}/hyb_CNN_solution/plots/RRMSEscore.png') - st.pyplot(fig) - plt.close() - - # Fill the output arrays - Ytest_hat_fl = model.predict(test_generator, verbose=1) - - # Create the multidimensional arrays for the results and ground-truth values - mat_pred = np.zeros((Ytest_fl[0].shape[0],p,Ytest_fl[0].shape[1])) - print(mat_pred.shape) - - # Fill the output arrays - for i in range(p): - for j in range(Ytest_fl[0].shape[0]): - mat_pred[j,i,:]=Ytest_hat_fl[i][j,:] - - if decision2 == 'no': - pass - elif decision2 == 'auto': - mat_pred = mat_pred * std_val + med_val - elif decision2 == 'range': - mat_pred = mat_pred * range_val + min_val - elif decision2 == 'MaxPerMode': - mat_pred = mat_pred * max_val - - if output2 == 'yes': - Ytest_all = model.predict(all_generator, verbose=1) - mat_all = np.zeros((Ytest_all[0].shape[0],p,Ytest_all[0].shape[1])) - - for i in range(p): - for j in range(Ytest_all[0].shape[0]): - mat_all[j,i,:]=Ytest_all[i][j,:] - - if decision2 == 'no': - pass - elif decision2 == 'auto': - mat_all = mat_all * std_val + med_val - elif decision2 == 'range': - mat_all = mat_all * range_val + min_val - elif decision2 == 'MaxPerMode': - mat_all = mat_all * max_val - - new_dim = [mat_pred.shape[0],mat_pred.shape[2]] - - AML_pre_LSTM = np.transpose(np.squeeze(mat_pred[:,0,:])) - num_snap = AML_pre_LSTM.shape[1] - - mat_time_slice_index = np.zeros((Ytest_fl[0].shape[0],p),dtype=int) - for i in range(p): - for j in range(Ytest_fl[0].shape[0]): - mat_time_slice_index[j,i]=test_idxs[j]+k+i - - time_lag = mat_time_slice_index[0,0] - - # Matrix reconstruction - - Mat_pre = np.dot(U,AML_pre_LSTM) - - # Tensor reconstruction - Ten_pre = np.reshape(Mat_pre, [nv,nx,ny,AML_pre_LSTM.shape[1]], order='F') - - for iter in range(data.shape[3]): - variable = Ten_pre[iter,:,:,:] - Ten_pre[iter,:,:,:] = variable*(Factor[iter])+Media[iter]+Media_tiempo[iter,:,:,:Ten_pre.shape[3]] - - if output2 == 'yes': - AML_all_LSTM = np.transpose(np.squeeze(mat_all[:,0,:])) - - - # RRMSE Measure with Original Tensor - if output4 == 'yes': - RRMSE_orNN = np.zeros(Tensor_orig.shape[0]) - for iter in range(Tensor_orig1.shape[0]): - - diff = Ten_pre[iter,:,:,:] - Tensor_orig[iter,:,:,time_lag:time_lag+num_snap] - RRMSE_orNN[iter] = LA.norm(np.reshape(diff.flatten(),(-1,1)),ord=2)/LA.norm(np.reshape(Tensor_orig[iter,:,:,time_lag:time_lag+num_snap].flatten(),(-1,1)),ord=2) - - st.write(f'###### RRMSE measure with original tensor - variable {iter+1}: {np.round(RRMSE_orNN[iter]*100, 3)}%\n') - - st.write('\n') - np.save(f"{path0}/hyb_CNN_solution/RRMSE_orNN.npy", RRMSE_orNN) - - # RRMSE with Truncated - if output5 == 'yes': - RRMSE_trNN = np.zeros(Tensor_orig.shape[0]) - for iter in range(Tensor_orig1.shape[0]): - - diff = Ten_pre[iter,:,:,:] - Ten_trunc[iter,:,:,time_lag:time_lag+num_snap] - RRMSE_trNN[iter] = LA.norm(np.reshape(diff.flatten(),(-1,1)),ord=2)/LA.norm(np.reshape(Ten_trunc[iter,:,:,time_lag:time_lag+num_snap].flatten(),(-1,1)),ord=2) - - st.write(f'###### RRMSE measure with truncated tensor - variable {iter+1}: {np.round(RRMSE_trNN[iter]*100, 3)}%\n') - - st.write('\n') - np.save(f"{path0}/hyb_CNN_solution/RRMSE_trNN.npy", RRMSE_trNN) - - # RRMSE Measure - if output6 == 'yes': - diff = AML_pre_LSTM - AML[:,time_lag:time_lag+num_snap] - globalRRMSE = LA.norm(np.reshape(diff.flatten(),(-1,1)),ord=2)/LA.norm(np.reshape(AML[:,time_lag:time_lag+num_snap].flatten(),(-1,1)),ord=2) - st.write(f'###### Global RRMSE measure: {np.round(globalRRMSE*100, 3)}%\n') - st.write('\n') - rrmse_arr = np.zeros(num_snap) - for i in range(num_snap): - diff = AML[:,time_lag+i]-AML_pre_LSTM[:,i] - rrmse_arr[i] = LA.norm(diff.flatten(),2)/LA.norm(np.transpose(AML[:,i]).flatten(),2) - - fig, ax = plt.subplots() - ax.plot(rrmse_arr,'k*-') - ax.set_xlabel('Time') - ax.set_ylabel('RRMSE') - plt.savefig(f'{path0}/hyb_CNN_solution/plots/rrmse.png') - st.pyplot(fig) - plt.close() - - # AML Comparison - if output7 == 'yes': - # AML Comparation - if not os.path.exists(f'{path0}/hyb_CNN_solution/Mode_Comparison'): - os.mkdir(f"{path0}/hyb_CNN_solution/Mode_Comparison") - - AML_pre_1 = np.zeros([AML.shape[0],time_lag]) - AML_pre_1 = np.concatenate((AML_pre_1,AML_pre_LSTM),axis=1) - - for i in nModes: - fig, ax = plt.subplots() - ax.plot(AML[i-1,time_lag:],'k*-') - ax.plot(AML_pre_1[i-1,time_lag:],'m*-') - ax.set_xlabel("Time", fontsize=12) - ax.set_title(f"Comparation between AML and AMLPRE at mode {i}", fontsize=14) - plt.savefig(f'{path0}/hyb_CNN_solution/Mode_Comparison/aMLComparation_m_{i}_test.png') - st.pyplot(fig) - plt.close() - if output2 == 'yes': - fig, ax = plt.subplots() - ax.plot(AML[i-1,:nt-k-p+1],'k*-', label='Original') - ax.plot(k+all_idxs[:train_idxs[-1]],AML_all_LSTM[i-1,:train_idxs[-1]],'b*-', label='Training') - ax.plot(k+all_idxs[1+train_idxs[-1]:val_idxs[-1]],AML_all_LSTM[i-1,1+train_idxs[-1]:val_idxs[-1]],'r*-', label='Validation') - ax.plot(k+all_idxs[1+val_idxs[-1]:all_idxs[-1]],AML_all_LSTM[i-1,1+val_idxs[-1]:all_idxs[-1]],'g*-', label='Test') - ax.set_xlabel("Time", fontsize=12) - ax.legend() - ax.set_title(f"Mode {i}", fontsize=14) - namefig1 = f'{path0}/hyb_CNN_solution/Mode_Comparison/aMLComparation_m_{i}_all.png' - plt.savefig(namefig1) - st.pyplot(fig) - plt.close() - - - # Tensor Comparison - if output8 == 'yes': - if not os.path.exists(f'{path0}/hyb_CNN_solution/Snapshots'): - os.mkdir(f"{path0}/hyb_CNN_solution/Snapshots") - - for i in range(index.shape[0]): - namefig_orig1 = f'{path0}/hyb_CNN_solution/Snapshots/snap_comp{index[i,0]+1}_t{index[i,1]+1}.png' - fig, ax = plt.subplots() - - ax.contourf(xv,yv,Tensor_orig[int(index[i,0]),:,:,time_lag+int(index[i,1])],100,cmap='jet', - vmin = np.amin(Tensor_orig[int(index[i,0]),:,:,time_lag+int(index[i,1])]), - vmax = np.amax(Tensor_orig[int(index[i,0]),:,:,time_lag+int(index[i,1])])) - - ax.contourf(-xv,yv,Ten_pre[int(index[i,0]),:,:,int(index[i,1])],100,cmap='jet', - vmin = np.amin(Tensor_orig[int(index[i,0]),:,:,time_lag+int(index[i,1])]), - vmax = np.amax(Tensor_orig[int(index[i,0]),:,:,time_lag+int(index[i,1])])) - - ax.set_title(f'Prediction vs.Original Data - Comp. {int(index[i,0])+1} Snapshot {int(index[i,1])+1}', fontsize=14) - - ax = plt.gca() - ax.set_aspect(1) - props = dict(boxstyle='round', facecolor='white', alpha=1) - ax.annotate('', xy=(0.5, -0.005), xycoords='axes fraction', xytext=(0.5, 1.005), - arrowprops=dict(arrowstyle='-', lw = 3, color='k')) - ax.axis('off') - plt.savefig(namefig_orig1) - st.pyplot(fig) - plt.close() - st.info("Press 'Refresh' to run a new case") - Refresh = st.button('Refresh') - if Refresh: - st.stop() diff --git a/v0.1_ModelFLOWs_web/hybRNN_model_page.py b/v0.1_ModelFLOWs_web/hybRNN_model_page.py deleted file mode 100644 index 51512be..0000000 --- a/v0.1_ModelFLOWs_web/hybRNN_model_page.py +++ /dev/null @@ -1,803 +0,0 @@ -import numpy as np -import pandas as pd -import os -import matplotlib.pyplot as plt -import time -import hdf5storage - - -from numpy import linalg as LA - -from sklearn.metrics import mean_squared_error, mean_absolute_error, median_absolute_error, r2_score - -import tensorflow as tf -from tensorflow import keras -from tensorflow.keras.layers import Dense, Reshape, TimeDistributed, Flatten -from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau -from tensorflow.keras.models import Model -from tensorflow.keras.layers import Input, LSTM - - - -import streamlit as st - -pd.set_option('display.max_columns',100) -pd.set_option('display.max_rows',100) - -def menu(): - st.title("Hybrid RNN Model") - st.write(""" -This algorithm proposes a hybrid predictive Reduced Order Model to solve reacting flow problems. -This algorithm is based on a dimensionality reduction using Proper Orthogonal Decomposition (POD) combined with deep learning architectures to predict the temporal coefficients of the POD modes. -The deep learning architecture implemented is: recursive neural network (RNN). -""") - st.write(" ## RNN Model - Parameter Configuration") - - def mean_absolute_percentage_error(y_true, y_pred): - epsilon = 1e-10 - y_true, y_pred = np.array(y_true), np.array(y_pred) - return np.mean(np.abs((y_true - y_pred) / np.maximum(epsilon,np.abs(y_true)))) * 100 - - def smape(A, F): - return ((100.0/len(A)) * np.sum(2 * np.abs(F - A) / (np.abs(A) + np.abs(F))+ np.finfo(float).eps)) - - def RRMSE(Tensor0, Reconst): - RRMSE = np.linalg.norm(np.reshape(Tensor0-Reconst,newshape=(np.size(Tensor0),1)),ord=2)/np.linalg.norm(np.reshape(Tensor0,newshape=(np.size(Tensor0),1))) - return(RRMSE) - - def custom_loss(y_actual,y_pred): - if decision2 == 'no-scaling': - Ten = tf.einsum('ij,kj->ki',tf.convert_to_tensor(U, dtype=tf.float32),y_pred) - Ten_actual = tf.einsum('ij,kj->ki',tf.convert_to_tensor(U, dtype=tf.float32),y_actual) - elif decision2 == 'MaxPerMode': - Ten = tf.einsum('ij,kj->ki',tf.convert_to_tensor(U, dtype=tf.float32),y_pred* (max_val)) - Ten_actual = tf.einsum('ij,kj->ki',tf.convert_to_tensor(U, dtype=tf.float32),y_actual* (max_val)) - elif decision2 == 'auto': - Ten = tf.einsum('ij,kj->ki',tf.convert_to_tensor(U, dtype=tf.float32),y_pred* (std_val)+med_val) - Ten_actual = tf.einsum('ij,kj->ki',tf.convert_to_tensor(U, dtype=tf.float32),y_actual* (std_val)+med_val) - elif decision2 == 'range': - Ten = tf.einsum('ij,kj->ki',tf.convert_to_tensor(U, dtype=tf.float32),y_pred* (range_val)+min_val) - Ten_actual = tf.einsum('ij,kj->ki',tf.convert_to_tensor(U, dtype=tf.float32),y_actual* (range_val)+min_val) - - Ten = tf.keras.layers.Reshape((nx*ny,nv))(Ten) - Ten_actual = tf.keras.layers.Reshape((nx*ny,nv))(Ten_actual) - - output_list = [] - output_list2 = [] - - for iter in range(Ten.shape[2]-1): - variable = Ten[:,:,iter+1] - variable_ac = Ten_actual[:,:,iter+1] - - if decision1 == 'no-scaling': - output_list.append(variable) - output_list2.append(variable_ac) - else: - Med = tf.cast(tf.reshape(Media_tiempo[iter+1,:,:,0],[nx*ny]),tf.float32) - output_list.append(variable*(Factor[iter+1])+Media[iter+1]+Med) - output_list2.append(variable_ac*(Factor[iter+1])+Media[iter+1]+Med) - - Ten2 = tf.stack(output_list) - Ten2_ac = tf.stack(output_list2) - - pred_sum_spec = tf.math.reduce_sum(Ten2,axis=0) - pred_sum_spec = Reshape((nx,ny))(pred_sum_spec) - - ac_sum_spec = tf.math.reduce_sum(Ten2_ac,axis=0) - ac_sum_spec = Reshape((nx,ny))(ac_sum_spec) - - custom_loss = tf.reduce_mean(tf.square(y_actual-y_pred)) + tf.reduce_mean(tf.square(ac_sum_spec - pred_sum_spec)) - - return custom_loss - - path0 = os.getcwd() - - if not os.path.exists(f'{path0}/RNN_solution'): - os.mkdir(f'{path0}/RNN_solution') - - n_modes = 18 - - selection = 'Tensor_cylinder_Re100.mat' - - decision1 = st.selectbox("Select the scaling of the variables implemented", ("auto", "pareto", "range", "no-scaling")) - decision2 = st.selectbox("Select the scaling of the temporal coefficients implemented", ("MaxPerMode", "auto", "range", "no-scaling")) - - n_modes = st.number_input('Select the number of modes to retain during SVD', min_value = 0, max_value = None, value = 15, step = 1) - - # Inputs - test_prop = 0.20 # test set proportion - val_prop = 0.25 # validation set proportion val_length = (1-test_prop)*val_prop // train_length = (1-test_prop)*(1-val_prop) - batch_size = st.slider('Select Batch Size', 4, 64, value = 8, step = 2) - k = 10 # number of snapshots used as predictors - p = 6 # number of snapshots used as time-ahead predictions - epoch = st.slider('Select training epochs', 0, 500, value = 200, step=10) - - act_func1 = st.selectbox('Select hidden layer activation function', ('relu', 'elu', 'sigmoid', 'softmax', 'tanh')) - act_func2 = st.selectbox('Select output layer activation function', ('tanh', 'relu', 'sigmoid')) - neurons = st.slider('Select number of neurons per layer', 1, 150, value = 30, step = 1) - shared_dim = st.slider('Select number of shared dims', 1, 100, value = 20, step = 1) - lr = st.number_input('Select learning rate', min_value = 0.0001, max_value = 0.01, value = 0.002, step = 0.0001, format = '%.4f') #learning_rate - lf = 'mse' - - go = st.button("Calculate") - - if go: - with st.spinner("Please wait while the model is being trained"): - if not os.path.exists(f'{path0}/hyb_RNN_solution/'): - os.mkdir(f'{path0}/hyb_RNN_solution/') - output1 = 'yes' # Error made by SVD in each variable - output2 = 'yes' # Plot Comparison Coefficients in all indexes - output3 = 'yes' # Figure loss function - output4 = 'yes' # RRMSE of each variable (Original Tensor) - output5 = 'yes' # RRMSE of each variable (Truncated Tensor) - output6 = 'yes' # Plot RRMSE temporal matrix - output7 = 'yes' # Comparison of the modes - output8 = 'yes' # Comparison of the snapshots - - if output7 == 'yes': # Indicate the number of the mode you want to compare - nModes = [1,2,3,4,5] - if output8 == 'yes': # Indicate the number of the variable and time step (nv, nt) - index = np.array([ - [0,1], - [1,1]]) - - Tensor_ = hdf5storage.loadmat(f'{path0}/{selection}') - data = list(Tensor_.values())[-1] - data = tf.transpose(data, (3, 1, 2, 0)) - - nt, ny, nx, nv = np.shape(data) - - x = np.linspace(0, 1, nx) - y = np.linspace(0, 1, ny) - xv, yv = np.meshgrid(y, x) - - # Original Tensor Reconstruction - Mat_orig = np.reshape(data,[nt,nv*nx*ny]) - Mat_orig = np.transpose(Mat_orig) - Tensor_orig = np.reshape(Mat_orig,[nv,nx,ny,nt], order='F') - sum_spec = np.sum(np.sum(Tensor_orig[1:,:,:,:],axis=3),axis=0)/(nt) - - # Centering and Scaling - if decision1 == 'no-scaling': - pass - else: - #Dummy variables - Tensor_orig1 = np.zeros(Tensor_orig.shape) - Factor = np.zeros(data.shape[3]) - Media = np.zeros(data.shape[3]) - Media_tiempo = np.zeros(np.shape(Tensor_orig)) - - for iter in range(nt): - Media_tiempo[:,:,:,iter] = np.mean(Tensor_orig,axis=3) - - Tensor_orig2 = Tensor_orig-Media_tiempo - - for iter in range(data.shape[3]): - variable = Tensor_orig2[iter,:,:,:] - if decision1 == 'range': - Factor[iter] = np.amax(variable)-np.amin(variable) #Range scaling - if decision1 == 'auto': - Factor[iter] = np.std(variable) #Auto scaling - if decision1 == 'pareto': - Factor[iter] = np.sqrt(np.std(variable)) #Pareto scaling - Media[iter] = np.mean(variable) - Tensor_orig1[iter,:,:,:] = (variable-Media[iter])/(Factor[iter]) - - Mat_orig = np.reshape(Tensor_orig1,[nv*nx*ny,nt],order='F') - - # Perform SVD - U, S, V = np.linalg.svd(Mat_orig, full_matrices=False) - - Modes = n_modes - S = np.diag(S) - - U = U[:,0:Modes] - S = S[0:Modes,0:Modes] - V = V[0:Modes,:] - - AML = np.dot(S,V) - tensor = AML - - if output1 == 'yes': - Mat_trunc = np.dot(U,AML) - Ten_trunc = np.reshape(Mat_trunc,[nv,nx,ny,nt], order='F') - - if decision1 == 'no-scaling': - pass - else: - for iter in range(data.shape[3]): - variable = Ten_trunc[iter,:,:,:] - Ten_trunc[iter,:,:,:] = variable*(Factor[iter])+Media[iter]+Media_tiempo[iter,:,:,:] - - if output1 == 'yes': - st.write('\n') - RRMSE_SVD = np.zeros(data.shape[3]) - for iter in range(data.shape[3]): - diff = Ten_trunc[iter,:,:,:] - Tensor_orig[iter,:,:,:] - RRMSE_SVD[iter] = LA.norm(np.reshape(diff.flatten(),(-1,1)),ord=2)/LA.norm(np.reshape(Tensor_orig[iter,:,:,:].flatten(),(-1,1)),ord=2) - st.write(f'###### SVD RRMSE error for variable {iter+1}: {np.round(RRMSE_SVD[iter]*100, 3)}%\n') - st.write('\n') - np.save(f'{path0}/hyb_RNN_solution/RRMSE_SVD.npy',RRMSE_SVD) - - if decision2 == 'no-scaling': - tensor_norm = tensor - elif decision2 == 'range': - min_val = np.amin(tensor) - range_val = np.ptp(tensor) - tensor_norm = (tensor-min_val)/range_val - elif decision2 == 'auto': - med_val = np.mean(tensor) - std_val =np.std(tensor) - tensor_norm = (tensor-med_val)/std_val - elif decision2 == 'MaxPerMode': - max_val = sum(np.amax(np.abs(tensor),axis=1)) - tensor_norm = tensor / max_val - - # Dataset configuration - total_length = tensor_norm.shape[1] # number of snapshots - channels_n = 0 # number of channels, assuming each snapshot is an image with n channels - dim_x = tensor_norm.shape[0] # following the simil that each snapshot is an image, the dimension x of that image - dim_y = 0 # following the simil that each snapshot is an image, the dimension y of that image - - print('total_length: ', total_length) - print('channels_n: ', channels_n) - print('dim_x: ', dim_x) - print('dim_y: ', dim_y) - - # Data generator - import math - class DataGenerator(tf.keras.utils.Sequence): - 'Generates data for Keras' - def __init__(self, data, list_IDs, batch_size=5, dim=(20), - k = 624, p = 1, - shuffle=True, till_end = False, only_test = False): - 'Initialization' - self.data = data - self.dim = dim - self.batch_size = batch_size - self.list_IDs = list_IDs - self.shuffle = shuffle - self.p = p - self.k = k - self.till_end = till_end - self.only_test = only_test - self.on_epoch_end() - - def __len__(self): - 'Denotes the number of batches per epoch' - if self.till_end: - lenx = math.ceil((len(self.list_IDs) / self.batch_size)) - else: - lenx = int(np.floor(len(self.list_IDs) / self.batch_size)) - return lenx - - def __getitem__(self, index): - 'Generate one batch of data' - # Generate indexes of the batch - indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size] - - # Find list of IDs - list_IDs_temp = [self.list_IDs[k] for k in indexes] - - # Generate data - X, y = self.__data_generation(list_IDs_temp) - if self.only_test: - return X - else: - return X, y - - def on_epoch_end(self): - 'Updates indexes after each epoch' - self.indexes = np.arange(len(self.list_IDs)) - if self.shuffle == True: - np.random.shuffle(self.indexes) - - def __data_generation(self, list_IDs_temp): - 'Generates data containing batch_size samples' # X : (n_samples, *dim, depth) - # Initialization - X = np.empty((self.batch_size, self.dim, self.k)) - y = [np.empty((self.batch_size, self.dim))]*self.p - - y_inter = np.empty((self.batch_size, self.dim, p)) - - # Generate data - lenn = len(list_IDs_temp) - for i, ID in enumerate(list_IDs_temp): - # Store Xtrain - X[i,:,:] = self.data[:,ID:ID+k] - # Store Ytrain - y_inter[i,:,:] = self.data[:,ID+k:ID+k+p] - - for j in range(self.p): - y[j] = y_inter[:,:,j] - y[j] = np.reshape(y[j], (lenn, -1)) - - X = X.transpose((0,2,1)) - - return X, y - - - # Prepare the dataset indexes - period_transitorio = 0 - stride_train = 1 - stride_val = 1 - stride_test = 1 - - dim=(dim_x) - - test_length = int(test_prop * total_length) - val_length = int((total_length - test_length) * val_prop) - train_length = total_length - val_length - test_length - - if int(train_length-period_transitorio-(k+p)) < 0: - train_n = 0 - elif int((train_length-period_transitorio-(k+p))//stride_train) == 0: - train_n = 1 - else: - train_n = int(((train_length-period_transitorio)-(k+p))//stride_train) - - if int(test_length-(k+p)) < 0: - test_n = 0 - elif int((test_length-(k+p))//stride_test) == 0: - test_n = 1 - else: - test_n = int((test_length-(k+p))//stride_test) - - if int(val_length-(k+p)) < 0: - val_n = 0 - elif int((val_length-(k+p))//stride_val) == 0: - val_n = 1 - else: - val_n = int((val_length-(k+p))//stride_val) - - # Indices for the beginnings of each batch - train_idxs = np.empty([train_n], dtype='int') - val_idxs = np.empty([val_n], dtype='int') - test_idxs = np.empty([test_n], dtype='int') - - j = period_transitorio - for i in range(train_n): - train_idxs[i] = j - j = j+stride_train - - j = train_length - for i in range(val_n): - val_idxs[i] = j - j = j+stride_val - - j = train_length + val_length - for i in range(test_n): - test_idxs[i] = j - j = j+stride_test - - - - # Generators - training_generator = DataGenerator(tensor_norm, train_idxs, - dim = dim, - batch_size = batch_size, - k = k, p = p, till_end = False, - only_test = False, - shuffle = True) - validation_generator = DataGenerator(tensor_norm, val_idxs, - dim = dim, - batch_size = batch_size, - k = k, p = p, till_end = False, - only_test = False, - shuffle = False) - test_generator = DataGenerator(tensor_norm, test_idxs, - dim = dim, - batch_size = batch_size, - k = k, p = p, till_end = False, - only_test = True, - shuffle = False) - if output2 == 'yes': - all_idxs = np.int_(np.linspace(0,nt-k-p,nt-k-p+1)) - all_generator = DataGenerator(tensor_norm, all_idxs, - dim = dim, - batch_size = batch_size, - k = k, p = p, till_end = False, - only_test = True, - shuffle = False) - - print ('test_length: ', test_length) - print ('val_length: ', val_length) - print ('train_length: ', train_length) - print() - print ('test_n: ', test_n) - print ('val_n: ', val_n) - print ('train_n: ', train_n) - print() - print('test_generator_len: ', len(test_generator)) - print('validation_generator_len: ', len(validation_generator)) - print('training_generator_len: ', len(training_generator)) - - # Prepare Ytest - test_n_adjusted = int(test_n/batch_size)*batch_size # multiplo de batch_size - Ytest = [np.empty([test_n_adjusted, dim_x], dtype='float64')] * p - Ytest_fl = [np.empty([test_n_adjusted, dim_x ], dtype='float64')] * p - - Ytest_inter = np.empty([test_n_adjusted, dim_x, p], dtype='float64') - - for i in range(test_n_adjusted): - j = test_idxs[i] - Ytest_inter[i,:,:] = tensor_norm[:,j+k:j+k+p] - - for r in range(p): - Ytest[r] = Ytest_inter[:,:,r] - Ytest_fl[r] = np.copy(np.reshape(Ytest[r], (test_n_adjusted, -1)) ) - - - # LSTM MODEL 100 NODES - # Build Model - def create_model(in_shape, out_dim, p = 3, shared_dim = 1000, act_fun= act_func1): - x = Input(shape=in_shape) - - - v = LSTM(neurons)(x) - - v = Dense(p*neurons, activation= act_fun)(v) - - v = Reshape((p,neurons))(v) - tt = [1]*p - - r = TimeDistributed( Dense(shared_dim, activation=act_fun))(v) - s = tf.split(r, tt, 1) - for i in range(p): - s[i] = Flatten()(s[i]) - - o = [] - for i in range(p): - o.append( Dense(out_dim, activation=act_func2)(s[i]) ) - - m = Model(inputs=x, outputs=o) - opt = keras.optimizers.Adam(learning_rate=lr) - m.compile(loss=lf, optimizer=opt, metrics=[lf]) - return(m) - - #create the model - - in_shape = [k, dim_x] - out_dim = dim_x - - print(in_shape) - print(out_dim) - - model= create_model(in_shape,out_dim,p,shared_dim) - - - # save the best weights - save_string = 'colab_Cilin3D_AML_20_LSTM_100 v1' - - # save the best weights - save_best_weights = f'{path0}/hyb_RNN_solution/' + save_string + '.h5' - save_summary_stats = f'{path0}/hyb_RNN_solution/' + save_string + '.csv' - save_last_weights = f'{path0}/hyb_RNN_solution/' + save_string + '_last_w.h5' - save_results_metrics = f'{path0}/hyb_RNN_solution/' + save_string + '_results_metrics.csv' - - - # Training - np.random.seed(247531338) - t0 = time.time() - # Model training - callbacks = [ModelCheckpoint(save_best_weights, monitor='val_loss', save_best_only=True, mode='auto'), - EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='auto', min_delta = 0.001)] - #,ReduceLROnPlateau(monitor='val_loss', factor=0.8,patience=5,min_lr=1e-6)] - - model.summary(print_fn=lambda x: st.text(x)) - - history = model.fit(training_generator, - validation_data=validation_generator, - epochs=epoch, - verbose=1, - callbacks=callbacks) - - t1 = time.time() - print("Minutes elapsed: %f" % ((t1 - t0) / 60.)) - - model.save_weights(save_last_weights) - st.success('The model has been trained!') - - st.write("### Model Training Results - RNN") - - # Aggregate the summary statistics - summary_stats = pd.DataFrame({'epoch': [ i + 1 for i in history.epoch ], - #'train_acc': history.history['mean_squared_error'], - #'valid_acc': history.history['val_mean_squared_error'], - 'train_loss': history.history['loss'], - 'valid_loss': history.history['val_loss']}) - - summary_stats.to_csv(save_summary_stats) - - if not os.path.exists(f'{path0}/hyb_RNN_solution/plots'): - os.mkdir(f'{path0}/hyb_RNN_solution/plots') - - - if output3 == 'yes': - fig, ax = plt.subplots() - - ax.plot(summary_stats.train_loss, 'b',linewidth = 3) # blue - ax.plot(summary_stats.valid_loss, 'g--',linewidth = 3) # green - ax.set_xlabel('Training epochs',fontsize = 12) - ax.set_ylabel('Loss function',fontsize = 12) - - ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) - - plt.savefig(f'{path0}/hyb_RNN_solution/loss.png') - st.pyplot(fig) - plt.close() - - # Find the min validation loss during the training - min_loss, idx = min((loss, idx) for (idx, loss) in enumerate(history.history['val_loss'])) - print('Minimum val_loss at epoch', '{:d}'.format(idx+1), '=', '{:.6f}'.format(min_loss)) - min_loss = round(min_loss, 4) - - # Inference - t0 = time.time() - - model.load_weights(save_best_weights) - Ytest_hat_fl = model.predict(test_generator, verbose=1) - - t1 = time.time() - print("Minutes elapsed: %f" % ((t1 - t0) / 60.)) - - print('Performance measures on Test data, per sec') - lag = 0 - num_sec = Ytest_hat_fl[0].shape[0] - print(num_sec) - results_table = pd.DataFrame(index=['MSE','MAE','MAD','R2','SMAPE','RRMSE'],columns=range(num_sec)) - for i in range(num_sec): - results_table.iloc[0,i] = mean_squared_error( Ytest_fl[lag][i,:], Ytest_hat_fl[lag][i,:]) - results_table.iloc[1,i] = mean_absolute_error( Ytest_fl[lag][i,:], Ytest_hat_fl[lag][i,:]) - results_table.iloc[2,i] = median_absolute_error( Ytest_fl[lag][i,:], Ytest_hat_fl[lag][i,:]) - results_table.iloc[3,i] = r2_score( Ytest_fl[lag][i,:], Ytest_hat_fl[lag][i,:]) - results_table.iloc[4,i] = smape( Ytest_fl[lag][i,:], Ytest_hat_fl[lag][i,:]) - results_table.iloc[5,i] = RRMSE( np.reshape(Ytest_fl[lag][i,:],(-1,1)), np.reshape(Ytest_hat_fl[lag][i,:],(-1,1))) - results_table - - print('Performance measures on Test data, for all time, per time-ahead lag') - - results_table_global = pd.DataFrame(index=['MSE','MAE','MAD','R2','SMAPE','RRMSE'],columns=range(p)) - for i in range(p): - results_table_global.iloc[0,i] = mean_squared_error(Ytest_fl[i].flatten(), Ytest_hat_fl[i].flatten()) - results_table_global.iloc[1,i] = mean_absolute_error(Ytest_fl[i].flatten(), Ytest_hat_fl[i].flatten()) - results_table_global.iloc[2,i] = median_absolute_error(Ytest_fl[i].flatten(), Ytest_hat_fl[i].flatten()) - results_table_global.iloc[3,i] = r2_score(Ytest_fl[i].flatten(), Ytest_hat_fl[i].flatten()) - results_table_global.iloc[4,i] = smape(Ytest_fl[i].flatten(), Ytest_hat_fl[i].flatten()) - results_table_global.iloc[5,i] = RRMSE( np.reshape(Ytest_fl[i].flatten(),(-1,1)), np.reshape(Ytest_hat_fl[i].flatten(),(-1,1))) - - results_table_global['mean'] = results_table_global.mean(axis=1) - results_table_global - - fig, ax = plt.subplots() - ax.plot(range(num_sec),results_table.iloc[0,:], 'b') # green - ax.set_title("MSE score vs. forecast time index",fontsize = 14) - ax.set_xlabel("time", fontsize = 12) - ax.set_ylabel("MSE",fontsize = 12) - plt.savefig(f'{path0}/hyb_RNN_solution/plots/MSEscore.png') - st.pyplot(fig) - plt.close() - - fig, ax = plt.subplots() - ax.plot(range(num_sec),results_table.iloc[1,:], 'b') # green - ax.set_title("MAE score vs. forecast time index",fontsize = 14) - ax.set_xlabel("time", fontsize = 12) - ax.set_ylabel("MAE",fontsize = 12) - plt.savefig(f'{path0}/hyb_RNN_solution/plots/MAEscore.png') - st.pyplot(fig) - plt.close() - - fig, ax = plt.subplots() - ax.plot(range(num_sec),results_table.iloc[2,:], 'b') # green - ax.set_title("MAD score vs. forecast time index",fontsize = 14) - ax.set_xlabel("time", fontsize = 12) - ax.set_ylabel("MAD",fontsize = 12) - plt.savefig(f'{path0}/hyb_RNN_solution/plots/MADscore.png') - st.pyplot(fig) - plt.close() - - fig, ax = plt.subplots() - ax.plot(range(num_sec),results_table.iloc[3,:], 'b') # green - ax.set_title("R2 score vs. forecast time index",fontsize=14) - ax.set_xlabel("time", fontsize=12) - ax.set_ylabel("R2",fontsize=12) - plt.savefig(f'{path0}/hyb_RNN_solution/plots/R2score.png') - st.pyplot(fig) - plt.close() - - fig, ax = plt.subplots() - ax.plot(range(num_sec),results_table.iloc[4,:], 'b') # green - ax.set_title("SMAPE score vs. forecast time index",fontsize=14) - ax.set_xlabel("time", fontsize=12) - ax.set_ylabel("SMAPE",fontsize=12) - plt.savefig(f'{path0}/hyb_RNN_solution/plots/SMAPEscore.png') - st.pyplot(fig) - plt.close() - - fig, ax = plt.subplots() - ax.plot(range(num_sec),results_table.iloc[5,:], 'b') # green - ax.set_title("RRMSE score vs. forecast time index",fontsize=14) - ax.set_xlabel("time", fontsize=12) - ax.set_ylabel("RRMSE",fontsize=12) - plt.savefig(f'{path0}/hyb_RNN_solution/plots/RRMSEscore.png') - st.pyplot(fig) - plt.close() - - Ytest_hat_fl = model.predict(test_generator, verbose=1) - - # Create the multidimensional arrays for the results and ground-truth values - mat_pred = np.zeros((Ytest_fl[0].shape[0],p,Ytest_fl[0].shape[1])) - print(mat_pred.shape) - - - # Fill the output arrays - for i in range(p): - for j in range(Ytest_fl[0].shape[0]): - mat_pred[j,i,:]=Ytest_hat_fl[i][j,:] - - if decision2 == 'no-scaling': - pass - elif decision2 == 'auto': - mat_pred = mat_pred * std_val + med_val - elif decision2 == 'range': - mat_pred = mat_pred * range_val + min_val - elif decision2 == 'MaxPerMode': - mat_pred = mat_pred * max_val - - if output2 == 'yes': - Ytest_all = model.predict(all_generator, verbose=1) - mat_all = np.zeros((Ytest_all[0].shape[0],p,Ytest_all[0].shape[1])) - - for i in range(p): - for j in range(Ytest_all[0].shape[0]): - mat_all[j,i,:]=Ytest_all[i][j,:] - - if decision2 == 'no-scaling': - pass - elif decision2 == 'auto': - mat_all = mat_all * std_val + med_val - elif decision2 == 'range': - mat_all = mat_all * range_val + min_val - elif decision2 == 'MaxPerMode': - mat_all = mat_all * max_val - - # Data Extraction and Tensor Reconstruction - new_dim = [mat_pred.shape[0],mat_pred.shape[2]] - - AML_pre_LSTM = np.transpose(np.squeeze(mat_pred[:,0,:])) - num_snap = AML_pre_LSTM.shape[1] - - mat_time_slice_index = np.zeros((Ytest_fl[0].shape[0],p),dtype=int) - for i in range(p): - for j in range(Ytest_fl[0].shape[0]): - mat_time_slice_index[j,i]=test_idxs[j]+k+i - - time_lag = mat_time_slice_index[0,0] - - # Matrix reconstruction - - Mat_pre = np.dot(U,AML_pre_LSTM) - - # Tensor reconstruction - Ten_pre = np.reshape(Mat_pre, [nv,nx,ny,AML_pre_LSTM.shape[1]], order='F') - - for iter in range(data.shape[3]): - variable = Ten_pre[iter,:,:,:] - Ten_pre[iter,:,:,:] = variable*(Factor[iter])+Media[iter]+Media_tiempo[iter,:,:,:Ten_pre.shape[3]] - - if output2 == 'yes': - AML_all_LSTM = np.transpose(np.squeeze(mat_all[:,0,:])) - - - # RRMSE Measure with Original Tensor - if output4 == 'yes': - RRMSE_orNN = np.zeros(Tensor_orig.shape[0]) - for iter in range(Tensor_orig1.shape[0]): - - diff = Ten_pre[iter,:,:,:] - Tensor_orig[iter,:,:,time_lag:time_lag+num_snap] - RRMSE_orNN[iter] = LA.norm(np.reshape(diff.flatten(),(-1,1)),ord=2)/LA.norm(np.reshape(Tensor_orig[iter,:,:,time_lag:time_lag+num_snap].flatten(),(-1,1)),ord=2) - st.write(f'###### RRMSE measure with original tensor - variable {iter+1}: {np.round(RRMSE_orNN[iter]*100, 3)}%\n') - st.write('\n') - np.save(f"{path0}/hyb_RNN_solution/RRMSE_orNN.npy", RRMSE_orNN) - st.write('\n') - # RRMSE with Truncated - if output5 == 'yes': - RRMSE_trNN = np.zeros(Tensor_orig.shape[0]) - for iter in range(Tensor_orig1.shape[0]): - - diff = Ten_pre[iter,:,:,:] - Ten_trunc[iter,:,:,time_lag:time_lag+num_snap] - RRMSE_trNN[iter] = LA.norm(np.reshape(diff.flatten(),(-1,1)),ord=2)/LA.norm(np.reshape(Ten_trunc[iter,:,:,time_lag:time_lag+num_snap].flatten(),(-1,1)),ord=2) - - st.write(f'###### RRMSE measure with truncated tensor - variable {iter+1}: {np.round(RRMSE_trNN[iter]*100, 3)}%\n') - - st.write('\n') - np.save(f"{path0}/hyb_RNN_solution/RRMSE_trNN.npy", RRMSE_trNN) - st.write('\n') - # RRMSE Measure - if output6 == 'yes': - diff = AML_pre_LSTM - AML[:,time_lag:time_lag+num_snap] - globalRRMSE = LA.norm(np.reshape(diff.flatten(),(-1,1)),ord=2)/LA.norm(np.reshape(AML[:,time_lag:time_lag+num_snap].flatten(),(-1,1)),ord=2) - st.write(f'###### Global RRMSE measure: {np.round(globalRRMSE*100, 3)}%\n') - st.write('\n') - - - rrmse_arr = np.zeros(num_snap) - for i in range(num_snap): - diff = AML[:,time_lag+i]-AML_pre_LSTM[:,i] - rrmse_arr[i] = LA.norm(diff.flatten(),2)/LA.norm(np.transpose(AML[:,i]).flatten(),2) - - fig, ax = plt.subplots() - ax.plot(rrmse_arr,'k*-') - ax.set_xlabel('Time') - ax.set_ylabel('RRMSE') - plt.savefig(f'{path0}/hyb_RNN_solution/rrmse.png') - st.pyplot(fig) - plt.close() - - # AML Comparison - if output7 == 'yes': - # AML Comparation - if not os.path.exists(f'{path0}/hyb_RNN_solution/Mode_Comparison'): - os.mkdir(f"{path0}/hyb_RNN_solution/Mode_Comparison") - - AML_pre_1 = np.zeros([AML.shape[0],time_lag]) - AML_pre_1 = np.concatenate((AML_pre_1,AML_pre_LSTM),axis=1) - - for i in nModes: - fig, ax = plt.subplots() - ax.plot(AML[i-1,time_lag:],'k*-') - ax.plot(AML_pre_1[i-1,time_lag:],'m*-') - ax.set_xlabel("Time", fontsize = 12) - ax.set_title(f"Comparation between AML and AMLPRE at mode {i}", fontsize = 14) - plt.savefig(f'{path0}/hyb_RNN_solution/Mode_Comparison/aMLComparation_m_{i}_test.png') - st.pyplot(fig) - plt.close() - if output2 == 'yes': - fig, ax = plt.subplots() - ax.plot(AML[i-1,:nt-k-p+1],'k*-', label='Original') - ax.plot(k+all_idxs[:train_idxs[-1]],AML_all_LSTM[i-1,:train_idxs[-1]],'b*-', label='Training') - ax.plot(k+all_idxs[1+train_idxs[-1]:val_idxs[-1]],AML_all_LSTM[i-1,1+train_idxs[-1]:val_idxs[-1]],'r*-', label='Validation') - ax.plot(k+all_idxs[1+val_idxs[-1]:all_idxs[-1]],AML_all_LSTM[i-1,1+val_idxs[-1]:all_idxs[-1]],'g*-', label='Test') - ax.set_xlabel("Time", fontsize = 12) - ax.legend() - ax.set_title(f"Mode {i}", fontsize = 14) - fig.tight_layout() - namefig1 = f'{path0}/hyb_RNN_solution/Mode_Comparison/aMLComparation_m_{i}_all.png' - plt.savefig(namefig1) - st.pyplot(fig) - plt.close() - - - # Tensor Comparison - if output8 == 'yes': - if not os.path.exists(f'{path0}/hyb_RNN_solution/Snapshots'): - os.mkdir(f"{path0}/hyb_RNN_solution/Snapshots") - - for i in range(index.shape[0]): - namefig_orig1 = f'{path0}/hyb_RNN_solution/Snapshots/snap_comp{index[i,0]+1}_t{index[i,1]+1}.png' - fig, ax = plt.subplots() - - ax.contourf(xv,yv,Tensor_orig[int(index[i,0]),:,:,time_lag+int(index[i,1])],100,cmap='jet', - vmin = np.amin(Tensor_orig[int(index[i,0]),:,:,time_lag+int(index[i,1])]), - vmax = np.amax(Tensor_orig[int(index[i,0]),:,:,time_lag+int(index[i,1])])) - - ax.contourf(-xv,yv,Ten_pre[int(index[i,0]),:,:,int(index[i,1])],100,cmap='jet', - vmin = np.amin(Tensor_orig[int(index[i,0]),:,:,time_lag+int(index[i,1])]), - vmax = np.amax(Tensor_orig[int(index[i,0]),:,:,time_lag+int(index[i,1])])) - - ax.set_title(f'Prediction vs.Original Data - Comp. {int(index[i,0])+1} Snapshot {int(index[i,1])+1}', fontsize = 14) - - ax = plt.gca() - ax.set_aspect(1) - props = dict(boxstyle='round', facecolor='white', alpha=1) - ax.annotate('', xy=(0.5, -0.005), xycoords='axes fraction', xytext=(0.5, 1.005), - arrowprops=dict(arrowstyle='-', lw = 3, color='k')) - ax.axis('off') - plt.savefig(namefig_orig1) - st.pyplot(fig) - plt.close() - - st.info("Press 'Refresh' to run a new case") - Refresh = st.button('Refresh') - if Refresh: - st.stop() diff --git a/v0.1_ModelFLOWs_web/modelflows.jpeg b/v0.1_ModelFLOWs_web/modelflows.jpeg deleted file mode 100644 index fe42721..0000000 Binary files a/v0.1_ModelFLOWs_web/modelflows.jpeg and /dev/null differ diff --git a/v0.1_ModelFLOWs_web/thumbnail.png b/v0.1_ModelFLOWs_web/thumbnail.png deleted file mode 100644 index 6f41cbe..0000000 Binary files a/v0.1_ModelFLOWs_web/thumbnail.png and /dev/null differ diff --git a/v0.1_ModelFLOWs_app/AEmodel.py b/v0.2_ModelFLOWs_app/AEmodel.py similarity index 100% rename from v0.1_ModelFLOWs_app/AEmodel.py rename to v0.2_ModelFLOWs_app/AEmodel.py diff --git a/v0.1_ModelFLOWs_app/DLsuperresolution.py b/v0.2_ModelFLOWs_app/DLsuperresolution.py similarity index 94% rename from v0.1_ModelFLOWs_app/DLsuperresolution.py rename to v0.2_ModelFLOWs_app/DLsuperresolution.py index 86064a9..55800f4 100644 --- a/v0.1_ModelFLOWs_app/DLsuperresolution.py +++ b/v0.2_ModelFLOWs_app/DLsuperresolution.py @@ -649,52 +649,46 @@ def custom_loss(y_actual,y_pred): if 'Xr_sel_sc' in locals(): for i in range(np.size(Xr_sel_sc,0)): U_var_sel_sc = [] - S_var_sel_sc = [] V_var_sel_sc = [] for j in range(nvar): U, S, V = np.linalg.svd(Xr_sel_sc[i,...,j], full_matrices=False) + U = U @ np.sqrt(np.diag(S)) + V = V @ np.sqrt(np.diag(S)) U_var_sel_sc.append(U) - S_var_sel_sc.append(np.diag(S)) V_var_sel_sc.append(V) XUds_= np.stack(U_var_sel_sc, axis=-1) - XSds_= np.stack(S_var_sel_sc, axis=-1) XVds_= np.stack(V_var_sel_sc, axis=-1) XUds[i,...]=XUds_ - XSds[i,...]=XSds_ XVds[i,...]=XVds_ print('\nSVD Summary:') print('XUds: ',XUds.shape) - print('XSds: ',XSds.shape) print('XVds: ',XVds.shape) print('\n') else: for i in range(np.size(Xr_sel,0)): U_var_sel = [] - S_var_sel = [] V_var_sel = [] for j in range(nvar): U, S, V = np.linalg.svd(Xr_sel[i,...,j], full_matrices=False) + U = U @ np.sqrt(np.diag(S)) + V = V @ np.sqrt(np.diag(S)) U_var_sel.append(U) - S_var_sel.append(np.diag(S)) V_var_sel.append(V) - XUds_= np.stack(U_var_sel, axis=-1) - XSds_= np.stack(S_var_sel, axis=-1) + XUds_= np.stack(U_var_sel, axis=-1) XVds_= np.stack(V_var_sel, axis=-1) XUds[i,...]=XUds_ - XSds[i,...]=XSds_ XVds[i,...]=XVds_ print('\nSVD Summary:') print('XUds: ',XUds.shape) - print('XSds: ',XSds.shape) print('XVds: ',XVds.shape) print('\n') @@ -732,8 +726,6 @@ def custom_loss(y_actual,y_pred): XUds_train = XUds[train_ind] XUds_test = XUds[test_ind] - XSds_train = XSds[train_ind] - XSds_test = XSds[test_ind] XVds_train = XVds[train_ind] XVds_test = XVds[test_ind] print('\nTrain-test split summary: \n') @@ -741,8 +733,6 @@ def custom_loss(y_actual,y_pred): print('Xr_test: ',Xr_test.shape) print('XUds_train: ',XUds_train.shape) print('XUds_test: ',XUds_test.shape) - print('XSds_train: ',XSds_train.shape) - print('XSds_test: ',XSds_test.shape) print('XVds_train: ',XVds_train.shape) print('XVds_test: ',XVds_test.shape) print('\n') @@ -760,17 +750,15 @@ def custom_loss(y_actual,y_pred): # Model inputs in_U_dim = XUds.shape[1:] - in_S_dim = XSds.shape[1:] in_V_dim = XVds.shape[1:] out_dim = Xr_train.shape[1:] - + # Neural Network construction if hyper == 'no': - def create_model_1(in_U_dim, in_S_dim, in_V_dim, out_dim): + def create_model_1(in_U_dim, in_V_dim, out_dim): in_U = Input(shape=(*in_U_dim,),name='in_u') - in_S = Input(shape=(*in_S_dim,),name='in_s') in_V = Input(shape=(*in_V_dim,),name='in_v') u = Flatten(name='u_1')(in_U) @@ -784,9 +772,9 @@ def create_model_1(in_U_dim, in_S_dim, in_V_dim, out_dim): XUus_reshape = Reshape((out_dim[0],in_U_dim[1],in_U_dim[2]),name='reshape_u')(XUus) XVus_reshape = Reshape((in_V_dim[0],out_dim[1],in_V_dim[2]),name='reshape_v')(XVus) - X_hat = tf.einsum('ijkl,iknl,inpl->ijpl', XUus_reshape, in_S, XVus_reshape) + X_hat = tf.einsum('ijkl,ikpl->ijpl', XUus_reshape, XVus_reshape) - m = Model(inputs=[in_U,in_S,in_V],outputs= X_hat) + m = Model(inputs=[in_U,in_V],outputs= X_hat) # m_upsampled_matrices = Model(inputs=[in_U,in_S,in_V],outputs= [XUus_reshape,XVus_reshape]) m.compile(loss=loss_function, optimizer=Adam(learn_rate), metrics=['mse']) @@ -795,7 +783,7 @@ def create_model_1(in_U_dim, in_S_dim, in_V_dim, out_dim): return m # model, model_upsampled_matrices = create_model_1(in_U_dim, in_S_dim, in_V_dim, out_dim) - model = create_model_1(in_U_dim, in_S_dim, in_V_dim, out_dim) + model = create_model_1(in_U_dim, in_V_dim, out_dim) print('Model Summary:\n') model.summary() @@ -812,7 +800,6 @@ def create_model_hp(hp): hp_learning_rate = hp.Choice('learning_rate', values=[1e-2, 1e-3, 5e-3, 1e-4]) in_U = Input(shape=(*in_U_dim,),name='in_u') - in_S = Input(shape=(*in_S_dim,),name='in_s') in_V = Input(shape=(*in_V_dim,),name='in_v') u = Flatten(name='u_1')(in_U) @@ -826,9 +813,9 @@ def create_model_hp(hp): XUus_reshape = Reshape((out_dim[0],in_U_dim[1],in_U_dim[2]),name='reshape_u')(XUus) XVus_reshape = Reshape((in_V_dim[0],out_dim[1],in_V_dim[2]),name='reshape_v')(XVus) - X_hat = tf.einsum('ijkl,iknl,inpl->ijpl', XUus_reshape, in_S, XVus_reshape) + X_hat = tf.einsum('ijkl,ikpl->ijpl', XUus_reshape, XVus_reshape) - m = Model(inputs=[in_U,in_S,in_V],outputs= X_hat) + m = Model(inputs=[in_U,in_V],outputs= X_hat) m.compile(loss='mse', optimizer=Adam(hp_learning_rate), metrics=['mse']) @@ -847,7 +834,7 @@ def create_model_hp(hp): print('\nSearching for optimal hyperparameters...\n') - tuner.search([XUds_train,XSds_train,XVds_train], + tuner.search([XUds_train,XVds_train], Xr_train, batch_size=bs, epochs=50, @@ -881,7 +868,7 @@ def create_model_hp(hp): callbacks = [ModelCheckpoint(save_best_weights, monitor='val_loss', save_best_only=True, mode='auto'), EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto')] - history = model.fit([XUds_train,XSds_train,XVds_train],Xr_train, + history = model.fit([XUds_train,XVds_train],Xr_train, batch_size=bs, epochs=epoch, validation_split=0.15, @@ -939,7 +926,7 @@ def create_model_hp(hp): print('\nModel predicting. Please wait\n') t0 = time.time() - Xr_hat = model.predict([XUds_test, XSds_test, XVds_test]) + Xr_hat = model.predict([XUds_test, XVds_test]) t1 = time.time() print(f"\nPrediction complete. Time elapsed: {np.round(((t1 - t0) / 60.), 2)} minutes\n") diff --git a/v0.1_ModelFLOWs_app/DMDd.py b/v0.2_ModelFLOWs_app/DMDd.py similarity index 96% rename from v0.1_ModelFLOWs_app/DMDd.py rename to v0.2_ModelFLOWs_app/DMDd.py index 6ad1354..c3db010 100644 --- a/v0.1_ModelFLOWs_app/DMDd.py +++ b/v0.2_ModelFLOWs_app/DMDd.py @@ -403,20 +403,20 @@ def hodmd_IT(Vvir,d,t,esvd,edmd): #Frequencies and growthrate: delta=np.log(eigval).real/dt #r' omega=np.log(eigval).imag/dt #r' - + #Modes q=(Udot@eigvec)[(d-1)*N:d*N,:] #Taking steps back n*N Uvir=q/np.linalg.norm(q,axis=0) #n*N - #print("Size Uvir: ",Uvir.shape) + #Amplitudes: A=np.zeros((Uvir.shape[0]*Vvir.shape[1],Uvir.shape[1]),dtype=np.complex128) #(n*J)*N - #print("Size A: ",A.shape) + b=np.zeros(Uvir.shape[0]*Vvir.shape[1],dtype=Vvir.dtype)#(n*J)*1 for i in range(Vvir.shape[1]): A[i*Uvir.shape[0]:(i+1)*Uvir.shape[0],:]=Uvir@np.diag(eigval**i) b[i*Uvir.shape[0]:(i+1)*Uvir.shape[0]]=Vvir[:,i] -# print(A[:Uvir.shape[0],:]) + Ua,sa,Wa=np.linalg.svd(A,full_matrices=False) #(n*J)*N, N*N, N*N a=Wa.conj().T@np.diag(sa**-1)@Ua.conj().T@b #N diff --git a/v0.1_ModelFLOWs_app/FullDLmodel.py b/v0.2_ModelFLOWs_app/FullDLmodel.py similarity index 100% rename from v0.1_ModelFLOWs_app/FullDLmodel.py rename to v0.2_ModelFLOWs_app/FullDLmodel.py diff --git a/v0.1_ModelFLOWs_app/GappyRepair.py b/v0.2_ModelFLOWs_app/GappyRepair.py similarity index 99% rename from v0.1_ModelFLOWs_app/GappyRepair.py rename to v0.2_ModelFLOWs_app/GappyRepair.py index 7c77fb9..2718733 100644 --- a/v0.1_ModelFLOWs_app/GappyRepair.py +++ b/v0.2_ModelFLOWs_app/GappyRepair.py @@ -250,7 +250,7 @@ def GappyRepair(): MSE_gaps[ii] = LA.norm(A_reconst[np.isnan(A_gappy)]-A_s[np.isnan(A_gappy)])/N - if ii>3 and MSE_gaps[ii]>=MSE_gaps[ii-1]: + if (ii>3 and MSE_gaps[ii]>=MSE_gaps[ii-1]) or MSE_gaps[ii]<1e-9: break else: A_s[np.isnan(A_gappy)] = A_reconst[np.isnan(A_gappy)] diff --git a/v0.1_ModelFLOWs_app/HODMD.py b/v0.2_ModelFLOWs_app/HODMD.py similarity index 100% rename from v0.1_ModelFLOWs_app/HODMD.py rename to v0.2_ModelFLOWs_app/HODMD.py diff --git a/v0.1_ModelFLOWs_app/HODMD_pred.py b/v0.2_ModelFLOWs_app/HODMD_pred.py similarity index 100% rename from v0.1_ModelFLOWs_app/HODMD_pred.py rename to v0.2_ModelFLOWs_app/HODMD_pred.py diff --git a/v0.1_ModelFLOWs_app/HybCNNpredmodel.py b/v0.2_ModelFLOWs_app/HybCNNpredmodel.py similarity index 100% rename from v0.1_ModelFLOWs_app/HybCNNpredmodel.py rename to v0.2_ModelFLOWs_app/HybCNNpredmodel.py diff --git a/v0.1_ModelFLOWs_app/HybRNNpredmodel.py b/v0.2_ModelFLOWs_app/HybRNNpredmodel.py similarity index 100% rename from v0.1_ModelFLOWs_app/HybRNNpredmodel.py rename to v0.2_ModelFLOWs_app/HybRNNpredmodel.py diff --git a/v0.1_ModelFLOWs_app/ModelFLOWs_app.py b/v0.2_ModelFLOWs_app/ModelFLOWs_app.py similarity index 94% rename from v0.1_ModelFLOWs_app/ModelFLOWs_app.py rename to v0.2_ModelFLOWs_app/ModelFLOWs_app.py index 6bfbf63..4a6fb68 100644 --- a/v0.1_ModelFLOWs_app/ModelFLOWs_app.py +++ b/v0.2_ModelFLOWs_app/ModelFLOWs_app.py @@ -8,6 +8,7 @@ import GappyRepair import mdHODMD import mdHODMD_pred +import mdHODMD_control import HODMD import HODMD_pred import mainHOSVD @@ -93,12 +94,13 @@ 1) Pattern detection 2) Data repairing 3) Prediction +4) Flow Control 0) Exit ''') while True: - operation = input('Select an OPERATION (1/2/3): ') + operation = input('Select an OPERATION (1/2/3/4): ') if operation.isdigit(): if int(operation) == 1: break @@ -106,6 +108,9 @@ break elif int(operation) == 3: break + elif int(operation) == 4: + mdHODMD_control.HODMDsens() + break elif int(operation) == 0: break else: diff --git a/v0.1_ModelFLOWs_app/Requirements.txt b/v0.2_ModelFLOWs_app/Requirements.txt similarity index 100% rename from v0.1_ModelFLOWs_app/Requirements.txt rename to v0.2_ModelFLOWs_app/Requirements.txt diff --git a/v0.1_ModelFLOWs_app/SVD.py b/v0.2_ModelFLOWs_app/SVD.py similarity index 100% rename from v0.1_ModelFLOWs_app/SVD.py rename to v0.2_ModelFLOWs_app/SVD.py diff --git a/v0.1_ModelFLOWs_app/Superresolution.py b/v0.2_ModelFLOWs_app/Superresolution.py similarity index 100% rename from v0.1_ModelFLOWs_app/Superresolution.py rename to v0.2_ModelFLOWs_app/Superresolution.py diff --git a/v0.1_ModelFLOWs_app/data_load.py b/v0.2_ModelFLOWs_app/data_load.py similarity index 96% rename from v0.1_ModelFLOWs_app/data_load.py rename to v0.2_ModelFLOWs_app/data_load.py index e7fd297..db77e9b 100644 --- a/v0.1_ModelFLOWs_app/data_load.py +++ b/v0.2_ModelFLOWs_app/data_load.py @@ -104,6 +104,7 @@ def main(filetype): while True: if filetype.strip().lower() in ['mat', '.mat']: wantedfile = input('Introduce name of input data file (as in: Desktop/Folder/data.mat): ') + print(wantedfile) if os.path.exists(f'{wantedfile}'): Tensor = load_data('mat', wantedfile) break diff --git a/v0.1_ModelFLOWs_app/hosvd.py b/v0.2_ModelFLOWs_app/hosvd.py similarity index 75% rename from v0.1_ModelFLOWs_app/hosvd.py rename to v0.2_ModelFLOWs_app/hosvd.py index c53a1ea..2476cbc 100644 --- a/v0.1_ModelFLOWs_app/hosvd.py +++ b/v0.2_ModelFLOWs_app/hosvd.py @@ -39,18 +39,20 @@ def svdtrunc(A, n): U, S, _ = randomized_svd(A, n_components = n) return U, S -def HOSVD_function(T,n): +def HOSVD_function(T,varepsilon1): '''Perform hosvd to tensor''' P = T.ndim U = np.zeros(shape=(1,P), dtype=object) UT = np.zeros(shape=(1,P), dtype=object) sv = np.zeros(shape=(1,P), dtype=object) - producto = n[0] + producto = np.size(T) - for i in range(1,P): - producto = producto * n[i] + if isinstance(varepsilon1, (list, np.ndarray)): + n = varepsilon1 + else: + n = T.shape - n = list(n) # to be able to assign values + n = list(n) for i in range(0,P): n[i] = int(np.amin((n[i],producto/n[i]))) @@ -63,6 +65,16 @@ def HOSVD_function(T,n): Uaux[:,0] = Ui[:,0] U[0][i] = Uaux else: + if isinstance(varepsilon1, (list, np.ndarray)): + pass + else: + count = 0 + for j in range(0,np.shape(svi)[0]): + if svi[j]/svi[0]<=varepsilon1: + pass + else: + count = count+1 + n[i] = count U[0][i] = Ui[:,0:n[i]] # U[i] = Ui UT[0][i] = np.transpose(U[0][i]) @@ -76,23 +88,15 @@ def HOSVD(Tensor,varepsilon1,nn,n,TimePos): if np.iscomplex(Tensor.any()) == False: Tensor = Tensor.astype(np.float32) - [TT,S,U,sv,n] = HOSVD_function(Tensor,n) # El problema está aquí dentro + [TT,S2,U,sv,nn2] = HOSVD_function(Tensor,varepsilon1) # El problema está aquí dentro ## Set the truncation of the singular values using varepsilon1 # (automatic truncation) - for i in range(1,np.size(nn)): - count = 0 - for j in range(0,np.shape(sv[0][i])[0]): - if sv[0][i][j]/sv[0][i][0]<=varepsilon1: - pass - else: - count = count+1 - - nn[i] = count + print(f'Initial number of singular values: {n}') - print(f'Number of singular values retained: {nn}') + print(f'Number of singular values retained: {nn2}') ## Perform HOSVD retaining n singular values: reconstruction of the modes - [TT,S2,U,sv2,nn2] = HOSVD_function(Tensor,nn) + #[TT,S2,U,sv2,nn2] = HOSVD_function(Tensor,nn) ## Construct the reduced matrix containing the temporal modes: UT = np.zeros(shape=np.shape(U), dtype=object) @@ -100,6 +104,6 @@ def HOSVD(Tensor,varepsilon1,nn,n,TimePos): for pp in range(0,np.size(nn2)): UT[0][pp] = np.transpose(U[0][pp]) for kk in range(0,nn2[TimePos-1]): - hatT.append(np.dot(sv2[0][TimePos-1][kk],UT[0][TimePos-1][kk,:])) + hatT.append(np.dot(sv[0][TimePos-1][kk],UT[0][TimePos-1][kk,:])) hatT = np.reshape(hatT, newshape=(len(hatT),np.size(hatT[0]))) return hatT,U,S2,sv,nn2,n,TT \ No newline at end of file diff --git a/v0.1_ModelFLOWs_app/mainHOSVD.py b/v0.2_ModelFLOWs_app/mainHOSVD.py similarity index 100% rename from v0.1_ModelFLOWs_app/mainHOSVD.py rename to v0.2_ModelFLOWs_app/mainHOSVD.py diff --git a/v0.1_ModelFLOWs_app/mdHODMD.py b/v0.2_ModelFLOWs_app/mdHODMD.py similarity index 80% rename from v0.1_ModelFLOWs_app/mdHODMD.py rename to v0.2_ModelFLOWs_app/mdHODMD.py index df7e631..7391e8e 100644 --- a/v0.1_ModelFLOWs_app/mdHODMD.py +++ b/v0.2_ModelFLOWs_app/mdHODMD.py @@ -294,70 +294,77 @@ def animate(i): GrowthRates = [] d_val = [] tol_val = [] - DMDmode_list = [] - Tensor_rec_list = [] - for d in d_list: - for varepsilon1 in tol1_list: - if len(tol1_list) > 1: - varepsilon2 = varepsilon1 - elif not vardec or vardec.strip().lower() in ['y', 'yes']: - varepsilon2 = varepsilon1 + + for varepsilon1 in tol1_list: + if len(tol1_list) > 1: + varepsilon2 = varepsilon1 + elif not vardec or vardec.strip().lower() in ['y', 'yes']: + varepsilon2 = varepsilon1 + + ## Perform HOSVD decomposition to calculate the reduced temporal matrix: hatT + nnin = nn + nnin = tuple(nnin) + print(f'\nPerforming HOSVD for tol = {varepsilon1}. Please wait...\n') + [hatT,U,S,sv,nn1,n,TT] = hosvd.HOSVD(TensorR,varepsilon1,nn,nn0,TimePos) + print('\nHOSVD complete!\n') + + # hatT: Reduced temporal matrix + for d in d_list: if not os.path.exists(f'{path0}/{filen}/{d}_tol_{varepsilon1}'): os.mkdir(f"{path0}/{filen}/d_{d}_tol_{varepsilon1}") if not os.path.exists(f'{path0}/{filen}/{d}_tol_{varepsilon1}/DMDmodes'): os.mkdir(f"{path0}/{filen}/d_{d}_tol_{varepsilon1}/DMDmodes") print(f'\nRunning HODMD for d = {d} and tol = {varepsilon1}') - for zz in range(0,n_iter): - if n_iter > 1: - print(f'Iteration number: {zz+1}') - if zz != 0: - del S,U,Frequency,Amplitude,GrowthRate,hatT,hatMode,sv,TensorR,nnin - TensorR = TensorReconst - - ## Perform HOSVD decomposition to calculate the reduced temporal matrix: hatT - nnin = nn - nnin = tuple(nnin) - print('\nPerforming HOSVD. Please wait...\n') - [hatT,U,S,sv,nn1,n,TT] = hosvd.HOSVD(TensorR,varepsilon1,nn,nn0,TimePos) - print('\nHOSVD complete!\n') - # hatT: Reduced temporal matrix - - ## Perform HODMD to the reduced temporal matrix hatT: - print('Performing HODMD. Please wait...\n') - [hatMode,Amplitude,Eigval,GrowthRate,Frequency] = DMDd.hodmd_IT(hatT,d,Time,varepsilon1,varepsilon2) - print('\nHODMD complete!\n') - ## Reconstruct the original Tensor using the DMD expansion: - TensorReconst = DMDd.reconst_IT(hatMode,Time,U,S,sv,nn1,TimePos,GrowthRate,Frequency) - RRMSE = np.linalg.norm(np.reshape(Tensor-TensorReconst,newshape=(np.size(Tensor),1)),ord=2)/np.linalg.norm(np.reshape(Tensor,newshape=(np.size(Tensor),1))) - print(f'Relative mean square error made in the calculations: {np.round(RRMSE*100, 3)}%\n') - - GRFreqAmp = np.zeros((np.size(GrowthRate),3)) - for ind in range (0,np.size(GrowthRate)): - GRFreqAmp[ind,0] = GrowthRate[ind] - GRFreqAmp[ind,1] = Frequency[ind] - GRFreqAmp[ind,2] = Amplitude[ind] + ## Perform HODMD to the reduced temporal matrix hatT: + print('Performing HODMD. Please wait...\n') + [hatMode,Amplitude,Eigval,GrowthRate,Frequency] = DMDd.hodmd_IT(hatT,d,Time,varepsilon1,varepsilon2) + print('\nHODMD complete!\n') + + ## Reconstruct the original Tensor using the DMD expansion: + TensorReconst = DMDd.reconst_IT(hatMode,Time,U,S,sv,nn1,TimePos,GrowthRate,Frequency) + + nn10 = nn1 + if n_iter>1: + for zz in range(0,n_iter): + print(f'Iteration number: {zz+1}') + if zz != 0: + # del SIT,UIT,Frequency,Amplitude,GrowthRate,hatTIT,hatMode,svIT,TensorIT,nnin + + TensorIT = TensorReconst + print(f'\nPerforming HOSVD for tol = {varepsilon1}. Please wait...\n') + [hatTIT,UIT,SIT,svIT,nn1IT,nIT,TTIT] = hosvd.HOSVD(TensorIT,varepsilon1,nn,nn0,TimePos) + + ## Perform HODMD to the reduced temporal matrix hatT: + print('Performing HODMD. Please wait...\n') + [hatMode,Amplitude,Eigval,GrowthRate,Frequency] = DMDd.hodmd_IT(hatTIT,d,Time,varepsilon1,varepsilon2) + print('\nHODMD complete!\n') + + ## Reconstruct the original Tensor using the DMD expansion: + TensorReconst = DMDd.reconst_IT(hatMode,Time,UIT,SIT,svIT,nn1IT,TimePos,GrowthRate,Frequency) - print('GrowthRate, Frequency and Amplitude:') - print(GRFreqAmp) + ## Break the loop when the number of singular values is the same in two consecutive iterations: + if nn1IT==nn10: + break + nn10 = nn1IT + print('\n') + + if n_iter > 1: + print(f'Final number of iterations for d = {d} and tol = {varepsilon1}: {zz+1}') - ## Break the loop when the number of singular values is the same in two consecutive iterations: + RRMSE = np.linalg.norm(np.reshape(Tensor-TensorReconst,newshape=(np.size(Tensor),1)),ord=2)/np.linalg.norm(np.reshape(Tensor,newshape=(np.size(Tensor),1))) + print(f'Relative mean square error made in the calculations: {np.round(RRMSE*100, 3)}%\n') - num = 0 - for ii in range(1,np.size(nn1)): - if nnin[ii]==nn1[ii]: - num = num+1 - - if num==np.size(nn1)-1: - break - nn = nn1 - print('\n') + GRFreqAmp = np.zeros((np.size(GrowthRate),3)) + for ind in range (0,np.size(GrowthRate)): + GRFreqAmp[ind,0] = GrowthRate[ind] + GRFreqAmp[ind,1] = Frequency[ind] + GRFreqAmp[ind,2] = Amplitude[ind] - if n_iter > 1: - print(f'Final number of iterations for d = {d} and tol = {varepsilon1}: {zz+1}') - print(f'\nRelative mean square error made in the calculations: {np.round(RRMSE*100, 3)}%\n') + print('GrowthRate, Frequency and Amplitude:') + print(GRFreqAmp) Frequencies.append(Frequency) Amplitudes.append(Amplitude) @@ -374,16 +381,13 @@ def animate(i): file_mat = str(f'{path0}/{filen}/d_{d}_tol_{varepsilon1}/GRFreqAmp.mat') hdf5storage.savemat(file_mat, mdic, appendmat=True, format='7.3') - # Tensor reconstruction - TensorReconst = DMDd.reconst_IT(hatMode,Time,U,S,sv,nn1,TimePos,GrowthRate,Frequency) - Tensor_rec_list.append(TensorReconst) ## Save the reconstruction of the tensor and the Growth rates, frequencies and amplitudes: # Reconstruction: if not decision_2 or decision_2.strip().lower() in ['npy', '.npy']: np.save(f'{path0}/{filen}/d_{d}_tol_{varepsilon1}/TensorReconst.npy', TensorReconst) - + if decision_2.strip().lower() in ['.mat', 'mat']: mdic = {"TensorReconst": TensorReconst} file_mat = str(f'{path0}/{filen}/d_{d}_tol_{varepsilon1}/TensorReconst.mat') @@ -391,14 +395,17 @@ def animate(i): ## Calculate DMD modes: print('Calculating DMD modes...') - N = np.shape(hatT)[0] - DMDmode = DMDd.modes_IT(N,hatMode,Amplitude,U,S,nn1,TimePos) - DMDmode_list.append(DMDmode) + if n_iter>1: + N = np.shape(hatTIT)[0] + DMDmode = DMDd.modes_IT(N,hatMode,Amplitude,UIT,SIT,nn10,TimePos) + else: + N = np.shape(hatT)[0] + DMDmode = DMDd.modes_IT(N,hatMode,Amplitude,U,S,nn1,TimePos) # Save DMD modes: if not decision_2 or decision_2.strip().lower() in ['npy', '.npy']: np.save(f'{path0}/{filen}/d_{d}_tol_{varepsilon1}/DMDmode.npy',DMDmode) - + if decision_2.strip().lower() in ['.mat', 'mat']: mdic = {"DMDmode": DMDmode} file_mat = str(f'{path0}/{filen}/d_{d}_tol_{varepsilon1}/DMDmode.mat') @@ -414,14 +421,14 @@ def animate(i): ax[0].set_title('Real part') ax[0].set_xlabel('X') ax[0].set_ylabel('Y') - + ax[1].contourf(DMDmode[:,:,ModeNum].imag) ax[1].set_title('Imaginary part') ax[1].set_xlabel('X') ax[1].set_ylabel('Y') plt.savefig(f'{path0}/{filen}/d_{d}_tol_{varepsilon1}/DMDmodes/ModeNum_{ModeNum+1}.png') plt.close(fig) - + for ModComp in range(DMDmode.shape[0]): for ModeNum in range(3): if TimePos==4: @@ -431,14 +438,14 @@ def animate(i): ax[0].set_title('Real part') ax[0].set_xlabel('X') ax[0].set_ylabel('Y') - + ax[1].contourf(DMDmode[ModComp,:,:,ModeNum].imag) ax[1].set_title('Imaginary part') ax[1].set_xlabel('X') ax[1].set_ylabel('Y') plt.savefig(f'{path0}/{filen}/d_{d}_tol_{varepsilon1}/DMDmodes/DMDmodeComp_{ModComp+1}_ModeNum_{ModeNum+1}.png') plt.close(fig) - + elif TimePos==5: nz = int(Tensor.shape[3] / 2) fig, ax = plt.subplots(1, 2, num=f'CLOSE TO CONTINUE RUN - DMD mode XY plane') @@ -447,7 +454,7 @@ def animate(i): ax[0].set_title('Real part - XY Plane') ax[0].set_xlabel('X') ax[0].set_ylabel('Y') - + ax[1].contourf(DMDmode[ModComp,:,:,nz,ModeNum].imag) ax[1].set_title('Imaginary part - XY Plane') ax[1].set_xlabel('X') @@ -459,7 +466,6 @@ def animate(i): # Frequency vs amplitudes comparison: colored_markers = ['bo', 'kx', 'ro', 'cx', 'mo', 'yx', 'k^', 'bs', 'gs', 'rs', 'cs', 'ms', 'ys', 'ks', 'b^', 'g^', 'r^', 'c^', 'm^', 'y^'] - total_cases = len(d_list) * len(tol1_list) if total_cases > 1: @@ -554,10 +560,18 @@ def animate(i): case = input(f'Select the case to plot (default case 1): ') if not case: case = 0 + if not decision_2 or decision_2.strip().lower() in ['npy', '.npy']: + TensorReconst = data_load.load_data('npy',f'{path0}/{filen}/d_{d_val[case]}_tol_{tol_val[case]}/TensorReconst.npy') + elif decision_2.strip().lower() in ['.mat', 'mat']: + TensorReconst = data_load.load_data('mat',f'{path0}/{filen}/d_{d_val[case]}_tol_{tol_val[case]}/TensorReconst.mat') break elif case.isdigit(): if int(case) <= total_cases: case = int(case)-1 + if not decision_2 or decision_2.strip().lower() in ['npy', '.npy']: + TensorReconst = data_load.load_data('npy',f'{path0}/{filen}/d_{d_val[case]}_tol_{tol_val[case]}/TensorReconst.npy') + elif decision_2.strip().lower() in ['.mat', 'mat']: + TensorReconst = data_load.load_data('mat',f'{path0}/{filen}/d_{d_val[case]}_tol_{tol_val[case]}/TensorReconst.mat') break else: print('\tError: Selected case does not exist\n') @@ -605,7 +619,7 @@ def animate(i): fig.suptitle(f'Real Data vs Reconstruction - Component comparison') ax[0].contourf(Tensor0[:, :, 0]) ax[0].scatter(x, y, c='black', s=50) - ax[1].plot(Time[:], Tensor_rec_list[case][y, x, :].real, 'k-x', label = 'Reconstructed Data') + ax[1].plot(Time[:], TensorReconst[y, x, :].real, 'k-x', label = 'Reconstructed Data') ax[1].plot(Time[:], Tensor[y, x, :], 'r-+', label = 'Real Data') ax[1].set_xlabel('Time') ax[1].set_ylabel('Data') @@ -620,7 +634,7 @@ def animate(i): fig.suptitle(f'Real Data vs Reconstruction - Case {case + 1} - Component comparison') ax[0].contourf(Tensor0[c, :, :, 0]) ax[0].scatter(x, y, c='black', s=50) - ax[1].plot(Time[:], Tensor_rec_list[case][c, y, x, :].real, 'k-x', label = 'Reconstructed Data') + ax[1].plot(Time[:], TensorReconst[c, y, x, :].real, 'k-x', label = 'Reconstructed Data') ax[1].plot(Time[:], Tensor[c, y, x, :], 'r-+', label = 'Real Data') ax[1].set_xlabel('Time') ax[1].set_ylabel('Data') @@ -637,7 +651,7 @@ def animate(i): fig.suptitle(f'Real Data vs Reconstruction - Case {case + 1} - Component comparison XY plane') ax[0].contourf(Tensor0[c, :, :, nz, 0]) ax[0].scatter(x, y, c='black', s=50) - ax[1].plot(Time[:], Tensor_rec_list[case][c, y, x, nz, :].real, 'k-x', label = 'Reconstructed Data') + ax[1].plot(Time[:], TensorReconst[c, y, x, nz, :].real, 'k-x', label = 'Reconstructed Data') ax[1].plot(Time[:], Tensor[c, y, x, nz, :], 'r-+', label = 'Real Data') ax[1].set_xlabel('Time') ax[1].set_ylabel('Data') @@ -667,22 +681,30 @@ def animate(i): case = input(f'Select the case to plot (default case 1): ') if not case: case = 0 + if not decision_2 or decision_2.strip().lower() in ['npy', '.npy']: + DMDmode = data_load.load_data('npy',f'{path0}/{filen}/d_{d_val[case]}_tol_{tol_val[case]}/DMDmode.npy') + elif decision_2.strip().lower() in ['.mat', 'mat']: + DMDmode = data_load.load_data('mat',f'{path0}/{filen}/d_{d_val[case]}_tol_{tol_val[case]}/DMDmode.mat') break elif case.isdigit(): if int(case) <= total_cases: case = int(case)-1 + if not decision_2 or decision_2.strip().lower() in ['npy', '.npy']: + DMDmode = data_load.load_data('npy',f'{path0}/{filen}/d_{d_val[case]}_tol_{tol_val[case]}/DMDmode.npy') + elif decision_2.strip().lower() in ['.mat', 'mat']: + DMDmode = data_load.load_data('mat',f'{path0}/{filen}/d_{d_val[case]}_tol_{tol_val[case]}/DMDmode.mat') break else: print('\tError: Selected case does not exist\n') else: print('\tError: Select a valid number format (must be integer)\n') while True: - ModeNum = input(f'Introduce the mode number to plot (default mode 1). Maximum number of modes is {DMDmode_list[case].shape[-1]}: ') + ModeNum = input(f'Introduce the mode number to plot (default mode 1). Maximum number of modes is {DMDmode.shape[-1]}: ') if not ModeNum: ModeNum = 0 break elif ModeNum.isdigit(): - if int(ModeNum) <=DMDmode_list[case].shape[-1]: + if int(ModeNum) <=DMDmode.shape[-1]: ModeNum = int(ModeNum)-1 break else: @@ -691,12 +713,12 @@ def animate(i): print('\tError: Select a valid number format (must be integer)\n') if TimePos > 3: while True: - ModComp = input(f'Introduce the component to plot (default component 1). Maximum number of components is {DMDmode_list[case].shape[0]}: ') + ModComp = input(f'Introduce the component to plot (default component 1). Maximum number of components is {DMDmode.shape[0]}: ') if not ModComp: ModComp = 0 break elif ModComp.isdigit(): - if int(ModComp) <= DMDmode_list[case].shape[0]: + if int(ModComp) <= DMDmode.shape[0]: ModComp = int(ModComp)-1 break else: @@ -707,12 +729,12 @@ def animate(i): if TimePos==3: fig, ax = plt.subplots(1, 2, num=f'CLOSE TO CONTINUE RUN - DMD mode') fig.suptitle(f'Case {case + 1} - DMDmode - Mode Number {ModeNum+1}') - ax[0].contourf(DMDmode_list[case][:,:,ModeNum].real) + ax[0].contourf(DMDmode[:,:,ModeNum].real) ax[0].set_title('Real part') ax[0].set_xlabel('X') ax[0].set_ylabel('Y') - ax[1].contourf(DMDmode_list[case][:,:,ModeNum].imag) + ax[1].contourf(DMDmode[:,:,ModeNum].imag) ax[1].set_title('Imaginary part') ax[1].set_xlabel('X') ax[1].set_ylabel('Y') @@ -722,12 +744,12 @@ def animate(i): if TimePos==4: fig, ax = plt.subplots(1, 2, num=f'CLOSE TO CONTINUE RUN - DMD mode') fig.suptitle(f'Case {case + 1} - DMDmode - Component {ModComp+1} Mode Number {ModeNum+1}') - ax[0].contourf(DMDmode_list[case][ModComp,:,:,ModeNum].real) + ax[0].contourf(DMDmode[ModComp,:,:,ModeNum].real) ax[0].set_title('Real part') ax[0].set_xlabel('X') ax[0].set_ylabel('Y') - ax[1].contourf(DMDmode_list[case][ModComp,:,:,ModeNum].imag) + ax[1].contourf(DMDmode[ModComp,:,:,ModeNum].imag) ax[1].set_title('Imaginary part') ax[1].set_xlabel('X') ax[1].set_ylabel('Y') @@ -737,12 +759,12 @@ def animate(i): nz = int(Tensor.shape[3] / 2) fig, ax = plt.subplots(1, 2, num=f'CLOSE TO CONTINUE RUN - DMD mode XY plane') fig.suptitle(f'Case {case + 1} - DMDmode XY plane - Component {ModComp+1} Mode Number {ModeNum+1}') - ax[0].contourf(DMDmode_list[case][ModComp,:,:,nz,ModeNum].real) + ax[0].contourf(DMDmode[ModComp,:,:,nz,ModeNum].real) ax[0].set_title('Real part - XY Plane') ax[0].set_xlabel('X') ax[0].set_ylabel('Y') - ax[1].contourf(DMDmode_list[case][ModComp,:,:,nz,ModeNum].imag) + ax[1].contourf(DMDmode[ModComp,:,:,nz,ModeNum].imag) ax[1].set_title('Imaginary part - XY Plane') ax[1].set_xlabel('X') ax[1].set_ylabel('Y') @@ -803,15 +825,15 @@ def animate(i): plane = input('Select a plane (XY, XZ, YZ)') if plane.strip().lower() == 'xy': Tensor = Tensor[:, :, :, nz, :] - Tensor_rec_list[case] = Tensor_rec_list[case][:, :, :, nz, :] + TensorReconst = TensorReconst[:, :, :, nz, :] break elif plane.strip().lower() == 'xz': Tensor = Tensor[:, :, 0, :, :] - Tensor_rec_list[case] = Tensor_rec_list[case][:, :, 0, :, :] + TensorReconst = TensorReconst[:, :, 0, :, :] break elif plane.strip().lower() == 'yz': Tensor = Tensor[:, 0, :, :, :] - Tensor_rec_list[case] = Tensor_rec_list[case][:, 0, :, :, :] + TensorReconst = TensorReconst[:, 0, :, :, :] break else: print('\tError: Select a valid plane\n') @@ -828,13 +850,13 @@ def animate(i): if not vidvel: vel = 0 video(Tensor, vel, Title = f'Original Data - {titles[vel]}') - video(Tensor_rec_list[case], vel, Title = f'Reconstructed data - {titles[vel]}') + video(TensorReconst, vel, Title = f'Reconstructed data - {titles[vel]}') break elif vidvel.isdigit(): if int(vidvel) <= Tensor.shape[0]: vel = int(vidvel) - 1 video(Tensor, vel, Title = f'Original Data - {titles[vel]}') - video(Tensor_rec_list[case], vel, Title = f'Reconstructed data - {titles[vel]}') + video(TensorReconst, vel, Title = f'Reconstructed data - {titles[vel]}') break else: print("\tError: Select a valid component\n") @@ -850,7 +872,7 @@ def animate(i): if int(vidvel) <= Tensor.shape[0]: vel = int(vidvel) - 1 video(Tensor, vel, Title = f'Original Data - {titles[vel]}') - video(Tensor_rec_list[case], vel, Title = f'Reconstructed data - {titles[vel]}') + video(TensorReconst, vel, Title = f'Reconstructed data - {titles[vel]}') break else: print("\tError: Select a valid component\n") diff --git a/v0.2_ModelFLOWs_app/mdHODMD_control.py b/v0.2_ModelFLOWs_app/mdHODMD_control.py new file mode 100644 index 0000000..4e71949 --- /dev/null +++ b/v0.2_ModelFLOWs_app/mdHODMD_control.py @@ -0,0 +1,490 @@ +def HODMDsens(): + import DMDd + import hosvd + import warnings + warnings.filterwarnings("ignore", message="Casting complex values to real discards the imaginary part") + + import os + os.environ['KMP_DUPLICATE_LIB_OK']='True' + import numpy as np + import matplotlib.pyplot as plt + import data_load + import hdf5storage + import time + timestr = time.strftime("%Y-%m-%d_%H.%M.%S") + + def is_float(string): + try: + float(string) + return True + except ValueError: + return False + + print('HODMD for control' + '\n') + print('-----------------------------') + + print(''' +The HODMD algorithm for flow control needs a prior calibration of HODMD. + ''') + + # Load data + path0 = os.getcwd() + print('Inputs:' + '\n') + + while True: + filetype = input('Select the input file format (.mat, .npy, .csv, .pkl, .h5): ') + if filetype.strip().lower() in ['mat', '.mat', 'npy', '.npy', 'csv', '.csv', 'pkl', '.pkl', 'h5', '.h5']: + break + else: + print('\tError: The selected input file format is not supported\n') + + Tensor, _ = data_load.main(filetype) + + + ##Number of snapshots SNAP: + while True: + SNAP = input(f'Introduce number of snapshots. Continue with {Tensor.shape[-1]} snapshots: ') + if not SNAP: + SNAP = int(Tensor.shape[-1]) + break + elif SNAP.isdigit(): + SNAP = int(SNAP) + break + else: + print('\tError: Select a valid number (must be integer)\n') + + ## d Parameter: + while True: + d = input(f'Introduce number of HODMD windows (d): ') + if d.isdigit(): + d = int(d) + break + else: + print('\tError: Select a valid number (must be integer)\n') + + + # SVD Tolerance + while True: + varepsilon1 = input('Introduce SVD tolerance. Continue with 1e-3: ') + if not varepsilon1: + varepsilon1 = 1e-3 + break + elif is_float(varepsilon1): + varepsilon1 = float(varepsilon1) + break + else: + print('\tError:Please introduce a number\n') + + varepsilon2 = varepsilon1 + + + ##Time: + while True: + deltaT = input('Introduce time step (deltaT). Continue with 1: ') + if not deltaT: + deltaT = 1 + break + elif is_float(deltaT): + deltaT = float(deltaT) + break + else: + print('\tError: Please introduce a number\n') + + + Time = np.linspace(0,SNAP-1,num=SNAP)*deltaT + + ## Position of temporal variable: + TimePos = Tensor.ndim + + #-------------------------------------------------------------------------------------------------------------------------------------- + ################### Output ################### + + print('\n-----------------------------') + print('HODMD for flow control summary:') + print('\n' + f'Number of snapshots set at: {SNAP}') + print(f'd Parameter(s) set at: {d}') + print(f'SVD tolerance(s) {varepsilon1}') + print(f'HODMD tolerance(s): {varepsilon2}') + print(f'Time gradient set at deltaT: {deltaT}') + + print('\n-----------------------------') + print('Outputs:\n') + + filen = input('Enter folder name to save the outputs or continue with default folder name: ') + if not filen: + filen = f'{timestr}_HODMDcontrol_solution' + else: + filen = f'{filen}' + + while True: + decision_2 = input('Select format of saved files (.mat, .npy). Continue with ".npy": ') + if not decision_2 or decision_2.strip().lower() in ['mat', '.mat', 'npy', '.npy']: + break + else: + print('\tError: Please select a valid output format\n') + + print('') + + # LOAD MESH in format (3, nx, ny ,nz) or (2, nx, ny) + while True: + decision_3 = input('Do you want to plot the non linear sensitivity? (y/n)') + if not decision_3 or decision_3.strip().lower() in ['yes', 'y']: + while True: + filetype = input('Select the input file format for the mesh (.mat, .npy, .csv, .pkl, .h5): ') + if filetype.strip().lower() in ['mat', '.mat', 'npy', '.npy', 'csv', '.csv', 'pkl', '.pkl', 'h5', '.h5']: + break + else: + print('\tError: The selected input file format is not supported\n') + Malla, _ = data_load.main(filetype) + break + elif decision_3.strip().lower() in ['no', 'n']: + break + else: + print('\tError: Please select a valid answer\n') + + if not os.path.exists(f'{path0}/{filen}'): + os.mkdir(f"{path0}/{filen}") + + ## Save results in folder: + + Tensor0 = Tensor + shapeTens = list(np.shape(Tensor)) + shapeTens[-1] = SNAP + Tensor = np.zeros(shapeTens) + + Tensor[..., :] = Tensor0[..., 0:SNAP] + TensorR = Tensor.copy() + + ## ALGORITHM: + + ## ITERATIVE: + nn0 = np.shape(Tensor) + nn = np.array(nn0) + nn[1:np.size(nn)] = 0 + + Frequencies = [] + Amplitudes = [] + GrowthRates = [] + d_val = [] + tol_val = [] + DMDmode_list = [] + Tensor_rec_list = [] + + print(f'\nRunning HODMD for control for d = {d} and tol = {varepsilon1}') + + for iter in range(3): + + + if not os.path.exists(f'{path0}/{filen}/Step_{iter}'): + os.mkdir(f"{path0}/{filen}/Step_{iter}") + if not os.path.exists(f'{path0}/{filen}/Step_{iter}/DMDmodes'): + os.mkdir(f"{path0}/{filen}/Step_{iter}/DMDmodes") + + if iter == 1: + Frequencies = [] + Amplitudes = [] + GrowthRates = [] + d_val = [] + tol_val = [] + DMDmode_list = [] + Tensor_rec_list = [] + TensorR = [] + if not decision_2 or decision_2.strip().lower() in ['npy', '.npy']: + TensorR = data_load.load_data('npy',f'{path0}/{filen}/Step_0/TensorReconst.npy') + elif decision_2.strip().lower() in ['.mat', 'mat']: + TensorR = data_load.load_data('mat',f'{path0}/{filen}/Step_0/TensorReconst.mat') + elif iter == 2: + Frequencies = [] + Amplitudes = [] + GrowthRates = [] + d_val = [] + tol_val = [] + DMDmode_list = [] + Tensor_rec_list = [] + TensorR = [] + TensorReconst = [] + + if not decision_2 or decision_2.strip().lower() in ['npy', '.npy']: + Tensor1 = data_load.load_data('npy',f'{path0}/{filen}/Step_0/TensorReconst.npy') + elif decision_2.strip().lower() in ['.mat', 'mat']: + Tensor1 = data_load.load_data('mat',f'{path0}/{filen}/Step_0/TensorReconst.mat') + Media = np.mean(Tensor1,axis=Tensor1.ndim-1) + TensorR = np.zeros(Tensor1.shape) + + for nsnap in range(SNAP): + TensorR[...,nsnap] = Tensor1[...,(SNAP-1)-nsnap] - Media + + Media = [] + + ## Perform HOSVD decomposition to calculate the reduced temporal matrix: hatT + nnin = nn + nnin = tuple(nnin) + print('\nPerforming HOSVD. Please wait...\n') + [hatT,U,S,sv,nn1,n,TT] = hosvd.HOSVD(TensorR,varepsilon1,nn,nn0,TimePos) + print('\nHOSVD complete!\n') + # hatT: Reduced temporal matrix + + ## Perform HODMD to the reduced temporal matrix hatT: + print('Performing HODMD. Please wait...\n') + [hatMode,Amplitude,Eigval,GrowthRate,Frequency] = DMDd.hodmd_IT(hatT,d,Time,varepsilon1,varepsilon2) + print('\nHODMD complete!\n') + + ## Reconstruct the original Tensor using the DMD expansion: + TensorReconst = DMDd.reconst_IT(hatMode,Time,U,S,sv,nn1,TimePos,GrowthRate,Frequency) + + RRMSE = np.linalg.norm(np.reshape(Tensor-TensorReconst,newshape=(np.size(Tensor),1)),ord=2)/np.linalg.norm(np.reshape(Tensor,newshape=(np.size(Tensor),1))) + print(f'Relative mean square error made in the calculations: {np.round(RRMSE*100, 3)}%\n') + + GRFreqAmp = np.zeros((np.size(GrowthRate),3)) + for ind in range (0,np.size(GrowthRate)): + GRFreqAmp[ind,0] = GrowthRate[ind] + GRFreqAmp[ind,1] = Frequency[ind] + GRFreqAmp[ind,2] = Amplitude[ind] + + print('GrowthRate, Frequency and Amplitude:') + print(GRFreqAmp) + + Frequencies.append(Frequency) + Amplitudes.append(Amplitude) + GrowthRates.append(GrowthRate) + d_val.append(d) + tol_val.append(varepsilon1) + + + if not decision_2 or decision_2.strip().lower() in ['npy', '.npy']: + np.save(f'{path0}/{filen}/Step_{iter}/GRFreqAmp.npy', GRFreqAmp) + + if decision_2.strip().lower() in ['.mat', 'mat']: + mdic = {"GRFreqAmp": GRFreqAmp} + file_mat = str(f'{path0}/{filen}/Step_{iter}/GRFreqAmp.mat') + hdf5storage.savemat(file_mat, mdic, appendmat=True, format='7.3') + + # Tensor reconstruction + if iter == 0: + TensorReconst = DMDd.reconst_IT(hatMode[:,:3],Time,U,S,sv,nn1,TimePos,GrowthRate[:3],Frequency[:3]) + else: + TensorReconst = DMDd.reconst_IT(hatMode,Time,U,S,sv,nn1,TimePos,GrowthRate,Frequency) + Tensor_rec_list.append(TensorReconst) + + ## Save the reconstruction of the tensor and the Growth rates, frequencies and amplitudes: + + # Reconstruction: + if not decision_2 or decision_2.strip().lower() in ['npy', '.npy']: + np.save(f'{path0}/{filen}/Step_{iter}/TensorReconst.npy', TensorReconst) + + if decision_2.strip().lower() in ['.mat', 'mat']: + mdic = {"TensorReconst": TensorReconst} + file_mat = str(f'{path0}/{filen}/Step_{iter}/TensorReconst.mat') + hdf5storage.savemat(file_mat, mdic, appendmat=True, format='7.3') + + ## Calculate DMD modes: + print('Calculating DMD modes...') + N = np.shape(hatT)[0] + DMDmode = DMDd.modes_IT(N,hatMode,Amplitude,U,S,nn1,TimePos) + DMDmode_list.append(DMDmode) + + # Save DMD modes: + if not decision_2 or decision_2.strip().lower() in ['npy', '.npy']: + np.save(f'{path0}/{filen}/Step_{iter}/DMDmode.npy',DMDmode) + + if decision_2.strip().lower() in ['.mat', 'mat']: + mdic = {"DMDmode": DMDmode} + file_mat = str(f'{path0}/{filen}/Step_{iter}/DMDmode.mat') + hdf5storage.savemat(file_mat, mdic, appendmat=True, format='7.3') + + print(f'\nSaving first 2 DMDmode plots to {path0}/{filen}/Step_{iter}/DMDmodes\n') + + if TimePos == 3: + for ModeNum in range(2): + fig, ax = plt.subplots(1, 2) + fig.suptitle(f'DMDmode - Mode Number {ModeNum+1}') + ax[0].contourf(DMDmode[:,:,ModeNum].real) + ax[0].set_title('Real part') + ax[0].set_xlabel('X') + ax[0].set_ylabel('Y') + + ax[1].contourf(DMDmode[:,:,ModeNum].imag) + ax[1].set_title('Imaginary part') + ax[1].set_xlabel('X') + ax[1].set_ylabel('Y') + plt.savefig(f'{path0}/{filen}/Step_{iter}/DMDmodes/ModeNum_{ModeNum+1}.png') + plt.close(fig) + + for ModComp in range(DMDmode.shape[0]): + for ModeNum in range(2): + if TimePos==4: + fig, ax = plt.subplots(1, 2) + fig.suptitle(f'DMDmode - Component {ModComp+1} Mode Number {ModeNum+1}') + ax[0].contourf(DMDmode[ModComp,:,:,ModeNum].real) + ax[0].set_title('Real part') + ax[0].set_xlabel('X') + ax[0].set_ylabel('Y') + + ax[1].contourf(DMDmode[ModComp,:,:,ModeNum].imag) + ax[1].set_title('Imaginary part') + ax[1].set_xlabel('X') + ax[1].set_ylabel('Y') + plt.savefig(f'{path0}/{filen}/Step_{iter}/DMDmodes/DMDmodeComp_{ModComp+1}_ModeNum_{ModeNum+1}.png') + plt.close(fig) + + elif TimePos==5: + nz = int(Tensor.shape[3] / 2) + fig, ax = plt.subplots(1, 2, num=f'CLOSE TO CONTINUE RUN - DMD mode XY plane') + fig.suptitle(f'DMDmode XY plane - Component {ModComp+1} Mode Number {ModeNum+1}') + ax[0].contourf(DMDmode[ModComp,:,:,nz,ModeNum].real) + ax[0].set_title('Real part - XY Plane') + ax[0].set_xlabel('X') + ax[0].set_ylabel('Y') + + ax[1].contourf(DMDmode[ModComp,:,:,nz,ModeNum].imag) + ax[1].set_title('Imaginary part - XY Plane') + ax[1].set_xlabel('X') + ax[1].set_ylabel('Y') + plt.savefig(f'{path0}/{filen}/Step_{iter}/DMDmodes/DMDmode_XY_Comp_{ModComp+1}_ModeNum_{ModeNum+1}.png') + plt.close(fig) + else: + pass + + if not decision_3 or decision_3.strip().lower() in ['yes', 'y']: + if not decision_2 or decision_2.strip().lower() in ['npy', '.npy']: + ModesD = data_load.load_data('npy',f'{path0}/{filen}/Step_1/DMDmode.npy') + ModesA = data_load.load_data('npy',f'{path0}/{filen}/Step_2/DMDmode.npy') + GRFA = data_load.load_data('npy',f'{path0}/{filen}/Step_1/GRFreqAmp.npy') + elif decision_2.strip().lower() in ['.mat', 'mat']: + ModesD = data_load.load_data('mat',f'{path0}/{filen}/Step_1/DMDmode.mat') + ModesA = data_load.load_data('mat',f'{path0}/{filen}/Step_2/DMDmode.mat') + GRFA = data_load.load_data('mat',f'{path0}/{filen}/Step_1/GRFreqAmp.mat') + + mode0 = ModesD[...,0] + moded = ModesD[...,1] + modea = ModesA[...,0] + + if TimePos == 4: + vector_x = Malla[0,0,:] + vector_y = Malla[1,:,0] + if vector_x[0] == vector_x[1]: + vector_x = Malla[0,:,0] + vector_y = Malla[1,0,:] + + grad0 = np.gradient(mode0, vector_x, vector_y, axis=(1,2)) + grad0 = np.array(grad0).transpose((1,0,2,3)) + + Part1 = np.zeros(Malla.shape,dtype = np.complex128) + Part1[0,:,:] = mode0[0,:,:]*grad0[0,0,:,:]+mode0[1,:,:]*grad0[0,1,:,:] + Part1[1,:,:] = mode0[0,:,:]*grad0[1,0,:,:]+mode0[1,:,:]*grad0[1,1,:,:] + + ff1 = np.linalg.norm(Part1,axis = 0) + + gradd = np.gradient(moded, vector_x, vector_y, axis=(1,2)) + gradd = np.array(gradd).transpose((1,0,2,3)) + + PartD = np.zeros(Malla.shape,dtype = np.complex128) + PartD[0,:,:] = moded[0,:,:]*gradd[0,0,:,:]+moded[1,:,:]*gradd[0,1,:,:] + PartD[1,:,:] = moded[0,:,:]*gradd[1,0,:,:]+moded[1,:,:]*gradd[1,1,:,:] + + + grada = np.gradient(modea, vector_x, vector_y, axis=(1,2)) + grada = np.array(grada).transpose((1,0,2,3)) + + PartA = np.zeros(Malla.shape,dtype = np.complex128) + PartA[0,:,:] = modea[0,:,:]*grada[0,0,:,:]+modea[1,:,:]*grada[0,1,:,:] + PartA[1,:,:] = modea[0,:,:]*grada[1,0,:,:]+modea[1,:,:]*grada[1,1,:,:] + + ff2= np.sqrt((abs(PartD[0,:,:])**2+abs(PartD[1,:,:])**2)*(abs(PartA[0,:,:])**2+abs(PartA[1,:,:])**2)) + + NLSens = np.abs(GRFA[1,1])* np.real(ff1)/np.amax(np.real(ff1)) + 2*ff2/np.amax(ff2) + + fig = plt.figure() + plt.contourf(Malla[0,:,:],Malla[1,:,:],np.real(ff1)/np.amax(np.real(ff1))) + plt.title('||M||*||M||') + plt.savefig(f'{path0}/{filen}/MM.png') + plt.close(fig) + + fig = plt.figure() + plt.contourf(Malla[0,:,:],Malla[1,:,:],ff2/np.amax(ff2)) + plt.title('||N||*||N*||') + plt.savefig(f'{path0}/{filen}/NN.png') + plt.close(fig) + + fig = plt.figure() + plt.contourf(Malla[0,:,:],Malla[1,:,:],NLSens) + plt.savefig(f'{path0}/{filen}/NLSens.png') + plt.close(fig) + + index = np.argwhere(NLSens == np.amax(NLSens)) + X_index = Malla[0,index[0,0],index[0,1]] + Y_index = Malla[1,index[0,0],index[0,1]] + print(f'The maximum value of the sensititvuity locates at: X = {X_index} Y = {Y_index} ') + + elif TimePos == 5: + + vector_x = Malla[0,0,0,:] + if vector_x[0] == vector_x[1]: + vector_x = Malla[0,0,:,0] + if vector_x[0] == vector_x[1]: + vector_x = Malla[0,:,0,0] + vector_y = Malla[1,0,0,:] + vector_z = Malla[2,0,:,0] + if vector_y[0] == vector_y[1]: + vector_y = Malla[1,0,:,0] + vector_z = Malla[2,0,0,:] + else: + vector_y = Malla[1,0,0,:] + vector_z = Malla[2,:,0,0] + if vector_y[0] == vector_y[1]: + vector_y = Malla[1,:,0,0] + vector_z = Malla[2,0,0,:] + else: + vector_y = Malla[1,0,:,0] + vector_z = Malla[2,:,0,0] + if vector_y[0] == vector_y[1]: + vector_y = Malla[1,:,0,0] + vector_z = Malla[2,0,:,0] + + grad0 = np.gradient(mode0, vector_x, vector_y, vector_z, axis=(1,2,3)) + grad0 = np.array(grad0).transpose((1,0,2,3,4)) + + Part1 = np.zeros(Malla.shape,dtype = np.complex128) + Part1[0,:,:] = mode0[0,:,:]*grad0[0,0,:,:]+mode0[1,:,:]*grad0[0,1,:,:]+mode0[2,:,:]*grad0[0,2,:,:] + Part1[1,:,:] = mode0[0,:,:]*grad0[1,0,:,:]+mode0[1,:,:]*grad0[1,1,:,:]+mode0[2,:,:]*grad0[1,2,:,:] + Part1[2,:,:] = mode0[0,:,:]*grad0[2,0,:,:]+mode0[1,:,:]*grad0[2,1,:,:]+mode0[2,:,:]*grad0[2,2,:,:] + ff1 = np.linalg.norm(Part1,axis=0) + + gradd = np.gradient(moded, vector_x, vector_y, vector_z, axis=(1,2,3)) + gradd = np.array(gradd).transpose((1,0,2,3,4)) + + PartD = np.zeros(Malla.shape,dtype = np.complex128) + PartD[0,:,:] = moded[0,:,:]*gradd[0,0,:,:]+moded[1,:,:]*gradd[0,1,:,:]+moded[2,:,:]*gradd[0,2,:,:] + PartD[1,:,:] = moded[0,:,:]*gradd[1,0,:,:]+moded[1,:,:]*gradd[1,1,:,:]+moded[2,:,:]*gradd[1,2,:,:] + PartD[2,:,:] = moded[0,:,:]*gradd[2,0,:,:]+moded[1,:,:]*gradd[2,1,:,:]+moded[2,:,:]*gradd[2,2,:,:] + + grada = np.gradient(modea, vector_x, vector_y, vector_z, axis=(1,2,3)) + grada = np.array(grada).transpose((1,0,2,3,4)) + + PartA = np.zeros(Malla.shape,dtype = np.complex128) + PartA[0,:,:] = modea[0,:,:]*grada[0,0,:,:]+modea[1,:,:]*grada[0,1,:,:]+modea[2,:,:]*grada[0,2,:,:] + PartA[1,:,:] = modea[0,:,:]*grada[1,0,:,:]+modea[1,:,:]*grada[1,1,:,:]+modea[2,:,:]*grada[1,2,:,:] + PartA[2,:,:] = modea[0,:,:]*grada[2,0,:,:]+modea[1,:,:]*grada[2,1,:,:]+modea[2,:,:]*grada[2,2,:,:] + + ff2= np.sqrt((abs(PartD[0,:,:,:])**2+abs(PartD[1,:,:,:])**2+abs(PartD[2,:,:,:])**2)*(abs(PartA[0,:,:,:])**2+abs(PartA[1,:,:,:])**2+abs(PartA[2,:,:,:])**2)) + NLSens = np.abs(GRFA[1,1])* np.real(ff1)/np.amax(np.real(ff1)) + 2*ff2/np.amax(ff2) + + + fig = plt.figure() + plt.contourf(Malla[0,:,:,int(Malla.shape[3] / 2)],Malla[1,:,:,int(Tensor.shape[3] / 2)],NLSens[:,:,int(Tensor.shape[3] / 2)]) + plt.savefig(f'{path0}/{filen}/NLSens.png') + plt.close(fig) + + index = np.argwhere(NLSens == np.amax(NLSens)) + X_index = Malla[0,index[0,0],index[0,1],index[0,2]] + Y_index = Malla[1,index[0,0],index[0,1],index[0,2]] + Z_index = Malla[2,index[0,0],index[0,1],index[0,2]] + print(f'The maximum value of the sensititvuity locates at: X = {X_index} Y = {Y_index} Z = {Z_index}') + + if not decision_2 or decision_2.strip().lower() in ['npy', '.npy']: + np.save(f'{path0}/{filen}/NLSens.npy',NLSens) + + if decision_2.strip().lower() in ['.mat', 'mat']: + mdic = {"NLSens": NLSens} + file_mat = str(f'{path0}/{filen}/NLSens.mat') + hdf5storage.savemat(file_mat, mdic, appendmat=True, format='7.3') \ No newline at end of file diff --git a/v0.1_ModelFLOWs_app/mdHODMD_pred.py b/v0.2_ModelFLOWs_app/mdHODMD_pred.py similarity index 100% rename from v0.1_ModelFLOWs_app/mdHODMD_pred.py rename to v0.2_ModelFLOWs_app/mdHODMD_pred.py