Skip to content

Commit e7b3a57

Browse files
authored
Merge pull request #33 from pchris2205/main
Add chip cooling case of Masterthesis
2 parents e51c98f + e5a0602 commit e7b3a57

24 files changed

+2288
-0
lines changed
Lines changed: 373 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,373 @@
1+
#!/usr/bin/env python
2+
# coding: utf-8
3+
4+
# In[ ]:
5+
6+
7+
import sys
8+
sys.path.append("wsl.localhost/Ubuntu-24.04/home/pchris2205/pip3/lib/python3.12/site-packages/")
9+
from PyFoam.RunDictionary.ParsedParameterFile import ParsedParameterFile
10+
from smartredis import Client
11+
import skopt
12+
import subprocess
13+
import numpy as np
14+
import tensorflow as tf
15+
import torch
16+
import gpytorch
17+
import botorch
18+
import matplotlib.pyplot as plt
19+
plt.style.use("bmh")
20+
from tqdm.notebook import tqdm
21+
from sklearn.gaussian_process import GaussianProcessRegressor
22+
from sklearn.gaussian_process.kernels import RBF
23+
import os
24+
os.environ['SMARTSIM_LOG_LEVEL'] = 'quiet'
25+
import fluidfoam
26+
from tqdm.notebook import tqdm
27+
import warnings
28+
from botorch.utils.multi_objective.pareto import is_non_dominated
29+
from botorch.utils.multi_objective.box_decompositions.dominated import DominatedPartitioning
30+
from botorch.utils.multi_objective.box_decompositions.non_dominated import FastNondominatedPartitioning
31+
from botorch.models.model_list_gp_regression import ModelListGP
32+
33+
from botorch.acquisition.analytic import ExpectedImprovement
34+
from botorch.acquisition.multi_objective.analytic import ExpectedHypervolumeImprovement
35+
36+
37+
# In[ ]:
38+
39+
40+
param_names = [
41+
'y_0',
42+
'phi1',
43+
'phi2',
44+
'a1',
45+
'a2',
46+
'n1',
47+
'n2',
48+
'm',
49+
'c'
50+
]
51+
nparams = len(param_names)
52+
x_range_y_0 = np.linspace(0., 70., 71).astype(np.float64)
53+
x_range_phi1 = np.linspace(0., 90., 19).astype(np.float64)
54+
x_range_phi2 = np.linspace(0., 90., 19).astype(np.float64)
55+
x_range_a1 = np.linspace(50., 200., 16).astype(np.float64)
56+
x_range_a2 = np.linspace(50., 200., 16).astype(np.float64)
57+
x_range_n1 = np.linspace(1., 10., 10).astype(np.float64)
58+
x_range_n2 = np.linspace(1., 10., 10).astype(np.float64)
59+
x_range_m = np.linspace(1., 10., 10).astype(np.float64)
60+
x_range_c = np.linspace(5., 15., 11).astype(np.float64)
61+
bounds = [x_range_y_0, x_range_phi1, x_range_phi2, x_range_a1, x_range_a2, x_range_n1, x_range_n2, x_range_m, x_range_c]
62+
63+
64+
# In[ ]:
65+
66+
67+
from smartsim import Experiment
68+
exp = Experiment("GeometryOptimization_MultiObjective_withoutNoise_WithJosipData")
69+
# Read the number of mpi ranks from system/decomposePar dictionary
70+
decompose_par = ParsedParameterFile("chip_cooling_with_new_boundaries/system/decomposeParDict")
71+
num_mpi_ranks = decompose_par['numberOfSubdomains']
72+
73+
74+
# In[ ]:
75+
76+
77+
def upper_confidence_bound(x, gp_model, beta):
78+
y_pred, y_std = gp_model.predict(x.reshape(-1, 1), return_std=True)
79+
ucb = y_pred + beta * y_std
80+
return ucb
81+
82+
def extract_output(model):
83+
fname = f'{model.path}/postProcessing/k_box/0/volFieldValue.dat'
84+
data_k_box = np.loadtxt(fname, skiprows=4)
85+
data_last_k_box = torch.tensor([data_k_box[-1,1]])
86+
fname = f'{model.path}/postProcessing/pressure_inlet/0/surfaceFieldValue.dat'
87+
data_p_inlet = np.loadtxt(fname, skiprows=5)
88+
data_last_p_inlet = data_p_inlet[-1,1]
89+
print(data_last_p_inlet)
90+
fname = f'{model.path}/postProcessing/pressure_outlet/0/surfaceFieldValue.dat'
91+
data_p_outlet = np.loadtxt(fname, skiprows=5)
92+
data_last_p_outlet = data_p_outlet[-1,1]
93+
print(data_last_p_outlet)
94+
data_pressure_drop = torch.tensor([data_last_p_outlet - data_last_p_inlet])
95+
print(data_pressure_drop)
96+
data_final = torch.vstack([data_last_k_box, data_pressure_drop]).transpose(-1, -2)
97+
print(data_final)
98+
return data_final
99+
100+
def evaluate_function_initialise(values,suffix):
101+
102+
nparams = len(param_names)
103+
104+
# Turn the values into a list of dictionaries
105+
params = {}
106+
for i in range(nparams):
107+
params[param_names[i]] = int(train_x[k,i])
108+
print(params)
109+
110+
rs = exp.create_run_settings(exe="simpleFoam", exe_args="-parallel", run_command="mpirun", run_args={"np": f"{num_mpi_ranks}"})
111+
ens = exp.create_ensemble("default_simulation" + str(suffix), params=params, perm_strategy='step', run_settings=rs)
112+
ens.attach_generator_files(to_configure="chip_cooling_with_new_boundaries")
113+
exp.generate(ens, overwrite=True, tag="!")
114+
res_allrun_pre = [subprocess.call(['bash', f"{ens_model.path}/Allrun.pre"]) for ens_model in ens.models]
115+
#res_allrun_pre = []
116+
#for ens_model in ens.models:
117+
# res_allrun_pre.append(subprocess.call(['bash', ens_model.path + "/Allrun.pre"]))
118+
# print(f'Allrun.pre in chip_cooling executed with return code: {res_allrun_pre}')
119+
exp.start(ens, block=True)
120+
121+
outputs = []
122+
for model in ens.models:
123+
try:
124+
outputs.append(extract_output(model))
125+
except:
126+
outputs.append(np.nan)
127+
128+
if len(outputs)==1:
129+
return outputs[0]
130+
else:
131+
return outputs
132+
133+
134+
# In[ ]:
135+
136+
137+
def evaluate_function(values, suffix):
138+
139+
nparams = len(param_names)
140+
141+
# Turn the values into a list of dictionaries
142+
params = {}
143+
for i in range(nparams):
144+
params[param_names[i]] = int(train_x[j+k,i])
145+
print(params)
146+
#rs = exp.create_run_settings(exe="simpleFoam")
147+
rs = exp.create_run_settings(exe="simpleFoam", exe_args="-parallel", run_command="mpirun", run_args={"np": f"{num_mpi_ranks}"})
148+
ens = exp.create_ensemble("evaluation" + str(suffix), params=params, perm_strategy='step', run_settings=rs)
149+
ens.attach_generator_files(to_configure="chip_cooling_with_new_boundaries")
150+
exp.generate(ens, overwrite=True, tag="!")
151+
res_allrun_pre = [subprocess.call(['bash', f"{ens_model.path}/Allrun.pre"]) for ens_model in ens.models]
152+
#res_allrun_pre = []
153+
#for ens_model in ens.models:
154+
# res_allrun_pre.append(subprocess.call(['bash', ens_model.path + "/Allrun.pre"]))
155+
# print(f'Allrun.pre in chip_cooling executed with return code: {res_allrun_pre}')
156+
exp.start(ens, block=True)
157+
158+
outputs = []
159+
for model in ens.models:
160+
try:
161+
outputs.append(extract_output(model))
162+
except:
163+
outputs.append(np.nan)
164+
165+
if len(outputs)==1:
166+
return outputs[0]
167+
else:
168+
return outputs
169+
170+
171+
# In[ ]:
172+
173+
174+
class GPModel(gpytorch.models.ExactGP, botorch.models.gpytorch.GPyTorchModel):
175+
_num_outputs = 1
176+
177+
def __init__(self, train_x, train_y, likelihood):
178+
super().__init__(train_x, train_y, likelihood)
179+
self.mean_module = gpytorch.means.ConstantMean()
180+
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
181+
182+
def forward(self, x):
183+
mean_x = self.mean_module(x)
184+
covar_x = self.covar_module(x)
185+
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
186+
187+
188+
def fit_gp_model(train_x, train_y, num_train_iters=500):
189+
# declare the GP
190+
#noise = 1e-4
191+
192+
likelihood = gpytorch.likelihoods.GaussianLikelihood()
193+
model = GPModel(train_x, train_y, likelihood)
194+
#model.likelihood.noise = noise
195+
196+
# train the hyperparameter (the constant)
197+
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
198+
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
199+
200+
model.train()
201+
likelihood.train()
202+
203+
for i in range(num_train_iters):
204+
optimizer.zero_grad()
205+
206+
output = model(train_x)
207+
loss = -mll(output, train_y)
208+
209+
loss.backward()
210+
optimizer.step()
211+
212+
model.eval()
213+
likelihood.eval()
214+
215+
return model, likelihood
216+
217+
218+
# In[ ]:
219+
220+
221+
def adapt_next_x(next_x):
222+
for p in range(nparams):
223+
arg = float(next_x[0,p])
224+
squared_difference = tf.math.squared_difference(bounds[p], arg)
225+
indice = int(tf.math.argmin(squared_difference))
226+
value_p = torch.tensor(bounds[p][indice]).reshape(1)
227+
if p == 0:
228+
next_x_adaptet = torch.tensor(value_p).reshape(1)
229+
else:
230+
next_x_adaptet = torch.cat([next_x_adaptet, value_p], dim=0)
231+
next_x_adaptet = torch.Tensor.numpy(next_x_adaptet)
232+
next_x_adaptet = [next_x_adaptet]
233+
next_x_adaptet = torch.tensor(next_x_adaptet)
234+
return next_x_adaptet
235+
236+
237+
# In[ ]:
238+
239+
240+
def plot_hypervolume_k_DeltaP(j):
241+
# Sample data
242+
xyz_x = np.linspace(1,j,j)
243+
xyz_hyper = hypervolumes[0,:j].reshape(j,1)
244+
# Create a plot with a line connecting the points
245+
plt.plot(xyz_x, xyz_hyper, marker='o', linestyle='-', label='hypervolume')
246+
# Add labels and title
247+
plt.xlabel('Nr. of queries') # Name for the x-axis
248+
plt.ylabel('hypervolume') # Name for the y-axis
249+
plt.title('Growth of hypervolume during Multi-Objectiv Optimization') # Title of the plot
250+
plt.legend()
251+
# Save the plot as an SVG file
252+
#plt.savefig("hypervolume_BO_multiobjectiv.svg", format='svg')
253+
254+
# Show the plot (optional)
255+
plt.show()
256+
257+
# Sample data
258+
xyz_k = train_y[10:,0].reshape(j,1)
259+
xyz_p = train_y[10:,1].reshape(j,1)
260+
261+
# Create a figure and a set of subplots
262+
fig, ax1 = plt.subplots()
263+
264+
# Plot the first y-axis (left side)
265+
fig1, =ax1.plot(xyz_x, xyz_k, 'b-', label='turbulent kinetic energy') # 'b-' means blue line
266+
ax1.set_xlabel('Nr. of queries') # X-axis label
267+
ax1.set_ylabel('turbulent kinetic energy [m²/s²]', color='b') # Y-axis label for the first y-axis
268+
ax1.tick_params(axis='y', labelcolor='b') # Color the y-ticks blue
269+
270+
# Create a second y-axis (right side)
271+
ax2 = ax1.twinx() # Create a twin Axes sharing the x-axis
272+
fig2, = ax2.plot(xyz_x, xyz_p, 'r-', label='pressure drop') # 'r-' means red line
273+
ax2.set_ylabel('pressure drop [Pa]', color='r') # Y-axis label for the second y-axis
274+
ax2.tick_params(axis='y', labelcolor='r') # Color the y-ticks red
275+
276+
# Add a title
277+
plt.title('Plot of pressure drop and turbulent kinetic energy')
278+
lines = [fig1, fig2]
279+
labels = [fig1.get_label(), fig2.get_label()]
280+
ax1.legend (lines, labels)
281+
282+
# Save the plot as an SVG file
283+
#plt.savefig("TurbulentKineticEnergy_PressureDrop_BO_multiobjective.svg", format='svg')
284+
# Show the plot
285+
plt.show()
286+
287+
288+
# In[ ]:
289+
290+
291+
num_samples = 5
292+
for k in range(num_samples):
293+
np.random.seed(3*k)
294+
default_sample = np.zeros(shape = (1,9))
295+
sample_y_0 = np.random.choice(x_range_y_0, size = 1)
296+
default_sample[:,0] = sample_y_0
297+
sample_phi1 = np.random.choice(x_range_phi1, size = 1)
298+
default_sample[:,1] = sample_phi1
299+
sample_phi2 = np.random.choice(x_range_phi2, size = 1)
300+
default_sample[:,2] = sample_phi2
301+
sample_a1 = np.random.choice(x_range_a1, size = 1)
302+
default_sample[:,3] = sample_a1
303+
sample_a2 = np.random.choice(x_range_a2, size = 1)
304+
default_sample[:,4] = sample_a2
305+
sample_n1 = np.random.choice(x_range_n1, size = 1)
306+
default_sample[:,5] = sample_n1
307+
sample_n2 = np.random.choice(x_range_n2, size = 1)
308+
default_sample[:,6] = sample_n2
309+
sample_m = np.random.choice(x_range_m, size = 1)
310+
default_sample[:,7] = sample_m
311+
sample_c = np.random.choice(x_range_c, size = 1)
312+
default_sample[:,8] = sample_c
313+
314+
default_sample = torch.from_numpy(default_sample)
315+
if k == 0:
316+
train_x = default_sample
317+
train_y = evaluate_function_initialise(train_x, f"_{k}")
318+
else:
319+
train_x = torch.cat([train_x, default_sample])
320+
default_y = evaluate_function_initialise(train_x, f"_{k}")
321+
train_y = torch.vstack([train_y, default_y])
322+
print(train_x)
323+
print(train_y)
324+
325+
326+
327+
# In[ ]:
328+
329+
330+
num_queries = 50
331+
num_repeats = 1
332+
hypervolumes = torch.zeros((num_repeats, num_queries))
333+
ref_point = torch.tensor([train_y[:, 0].min(), train_y[:, 1].min()])
334+
for trial in range(num_repeats):
335+
for j in range(num_queries):
336+
print("iteration", j)
337+
dominated_part = DominatedPartitioning(ref_point, train_y)
338+
hypervolumes[trial, j] = dominated_part.compute_hypervolume().item()
339+
model1, likelihood1 = fit_gp_model(train_x, train_y[:, 0])
340+
model2, likelihood2 = fit_gp_model(train_x, train_y[:, 1])
341+
with warnings.catch_warnings():
342+
warnings.simplefilter("ignore")
343+
policy = ExpectedHypervolumeImprovement(
344+
model=ModelListGP(model1, model2),
345+
ref_point=ref_point,
346+
partitioning=FastNondominatedPartitioning(ref_point, train_y)
347+
)
348+
with warnings.catch_warnings():
349+
warnings.filterwarnings('ignore', category=RuntimeWarning)
350+
next_x, acq_val = botorch.optim.optimize_acqf(
351+
policy,
352+
bounds = torch.tensor([[0.0, 0.0, 0.0, 50.0, 50.0, 1.0, 1.0, 1.0, 5.0], [70.0, 90.0, 90.0, 200.0, 200.0, 10.0, 10.0, 15.0, 15.0]]),
353+
q = 1,
354+
num_restarts = 20,
355+
raw_samples = 50
356+
)
357+
if j >= 30:
358+
last_five_hypervolumes = hypervolumes[0,j-5:j]
359+
if all(x == last_five_hypervolumes[0] for x in last_five_hypervolumes):
360+
print(f"Terminating loop early at j={j}")
361+
break
362+
next_x_adaptet = adapt_next_x(next_x)
363+
train_x = torch.cat([train_x, next_x_adaptet])
364+
next_y = evaluate_function(next_x_adaptet, f"_{j}")
365+
train_y = torch.cat([train_y, next_y])
366+
print(hypervolumes)
367+
#plot_hypervolume_k_DeltaP(j)
368+
print(hypervolumes.max())
369+
print(train_x[hypervolumes.argmax()])
370+
print(train_y[hypervolumes.argmax()])
371+
torch.save(train_y, "Results_train_y_TwoObjectiveFunctions_WithJosipDate.pt")
372+
torch.save(train_x, "Results_train_x_TwoObjectiveFunctions_WithJosipDate.pt")
373+
torch.save(hypervolumes, "Results_hypervolumes_TwoObjectiveFunctions_WithJosipDate.pt")

0 commit comments

Comments
 (0)