1+ #!/usr/bin/env python
2+
3+ ## \file write_SU2_MLP.py
4+ # \brief Python script for translating a trained Tensorflow model
5+ # to an SU2 MLP input file.
6+ # \author E.C.Bunschoten
7+ # \version 7.5.0 "Blackbird"
8+ #
9+ # SU2 Project Website: https://su2code.github.io
10+ #
11+ # The SU2 Project is maintained by the SU2 Foundation
12+ # (http://su2foundation.org)
13+ #
14+ # Copyright 2012-2022, SU2 Contributors (cf. AUTHORS.md)
15+ #
16+ # SU2 is free software; you can redistribute it and/or
17+ # modify it under the terms of the GNU Lesser General Public
18+ # License as published by the Free Software Foundation; either
19+ # version 2.1 of the License, or (at your option) any later version.
20+ #
21+ # SU2 is distributed in the hope that it will be useful,
22+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
23+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24+ # Lesser General Public License for more details.
25+ #
26+ # You should have received a copy of the GNU Lesser General Public
27+ # License along with SU2. If not, see <http://www.gnu.org/licenses/>.
28+
29+
30+ def write_SU2_MLP (file_out , input_names , output_names , model , input_min = [], input_max = [], output_min = [], output_max = []):
31+ # This function writes the MLP to a format which can be read by the SU2 MLP import tool
32+ # Inputs:
33+ # - file_out: output file name without extension
34+ # - input_names: list of strings with the variable names of the MLP input(s)
35+ # - output names: list of strings with the variable names of the MLP output(s)
36+ # - model: tensorflow.keras.model; the trained model
37+ # - input_min: lower normalization values for the input
38+ # - input_max: upper normalization values for the input
39+ # - output_min: lower normalization values for the output
40+ # - output_max: upper normalization values for the output
41+
42+ # MLP config
43+ model_config = model .get_config ()
44+
45+ # Number of input variables in the model
46+ n_inputs = model_config ['layers' ][0 ]['config' ]['batch_input_shape' ][1 ]
47+ # Number of output variables in the model
48+ n_outputs = model_config ['layers' ][- 1 ]['config' ]['units' ]
49+
50+ # Checking if number of provided input and output names are equal to those in the model
51+ if not n_inputs == len (input_names ):
52+ raise Exception ("Number of provided input names unequal to the number of inputs in the model" )
53+ if not n_outputs == len (output_names ):
54+ raise Exception ("Number of provided output names unequal to the number of outputs in the model" )
55+
56+ if len (input_max ) != len (input_min ):
57+ raise Exception ("Upper and lower input normalizations should have the same length" )
58+ if len (output_max ) != len (output_min ):
59+ raise Exception ("Upper and lower output normalizations should have the same length" )
60+
61+ if len (input_max ) > 0 and len (input_min ) != n_inputs :
62+ raise Exception ("Input normalization not provided for all inputs" )
63+
64+ if len (output_max ) > 0 and len (output_min ) != n_outputs :
65+ raise Exception ("Output normalization not provided for all outputs" )
66+
67+
68+ # Creating output file
69+ fid = open (file_out + '.mlp' , 'w+' )
70+ fid .write ("<header>\n \n " )
71+ n_layers = len (model_config ['layers' ])
72+
73+ # Writing number of neurons per layer
74+ fid .write ('[number of layers]\n %i\n \n ' % n_layers )
75+ fid .write ('[neurons per layer]\n ' )
76+ activation_functions = []
77+
78+ for iLayer in range (n_layers - 1 ):
79+ layer_class = model_config ['layers' ][iLayer ]['class_name' ]
80+ if layer_class == 'InputLayer' :
81+ # In case of the input layer, the input shape is written instead of the number of units
82+ activation_functions .append ('linear' )
83+ n_neurons = model_config ['layers' ][iLayer ]['config' ]['batch_input_shape' ][1 ]
84+ else :
85+ activation_functions .append (model_config ['layers' ][iLayer ]['config' ]['activation' ])
86+ n_neurons = model_config ['layers' ][iLayer ]['config' ]['units' ]
87+
88+ fid .write ('%i\n ' % n_neurons )
89+ fid .write ('%i\n ' % n_outputs )
90+
91+ activation_functions .append ('linear' )
92+
93+ # Writing the activation function for each layer
94+ fid .write ('\n [activation function]\n ' )
95+ for iLayer in range (n_layers ):
96+ fid .write (activation_functions [iLayer ] + '\n ' )
97+
98+ # Writing the input and output names
99+ fid .write ('\n [input names]\n ' )
100+ for input in input_names :
101+ fid .write (input + '\n ' )
102+
103+ if len (input_min ) > 0 :
104+ fid .write ('\n [input normalization]\n ' )
105+ for i in range (len (input_names )):
106+ fid .write ('%+.16e\t %+.16e\n ' % (input_min [i ], input_max [i ]))
107+
108+ fid .write ('\n [output names]\n ' )
109+ for output in output_names :
110+ fid .write (output + '\n ' )
111+
112+ if len (output_min ) > 0 :
113+ fid .write ('\n [output normalization]\n ' )
114+ for i in range (len (output_names )):
115+ fid .write ('%+.16e\t %+.16e\n ' % (output_min [i ], output_max [i ]))
116+
117+ fid .write ("\n </header>\n " )
118+ # Writing the weights of each layer
119+ fid .write ('\n [weights per layer]\n ' )
120+ for layer in model .layers :
121+ fid .write ('<layer>\n ' )
122+ weights = layer .get_weights ()[0 ]
123+ for row in weights :
124+ fid .write ("\t " .join (f'{ w :+.16e} ' for w in row ) + "\n " )
125+ fid .write ('</layer>\n ' )
126+
127+ # Writing the biases of each layer
128+ fid .write ('\n [biases per layer]\n ' )
129+
130+ # Input layer biases are set to zero
131+ fid .write ('%+.16e\t %+.16e\t %+.16e\n ' % (0.0 , 0.0 , 0.0 ))
132+
133+ for layer in model .layers :
134+ biases = layer .get_weights ()[1 ]
135+ fid .write ("\t " .join ([f'{ b :+.16e} ' for b in biases ]) + "\n " )
136+
137+ fid .close ()
0 commit comments