Skip to content

Commit 44c899e

Browse files
committed
CI
1 parent 85792f4 commit 44c899e

File tree

4 files changed

+13
-41
lines changed

4 files changed

+13
-41
lines changed

libmoon/auto_tester/test_synthetic.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,7 @@
2121
import argparse
2222
import numpy as np
2323

24-
2524
if __name__ == '__main__':
26-
2725
parser = argparse.ArgumentParser()
2826
parser.add_argument('--n-epoch', type=int, default=10000)
2927
parser.add_argument('--step-size', type=float, default=1e-3)
@@ -44,7 +42,6 @@
4442

4543
solver = solver_dict[parser.parse_args().solver_name]
4644
args = parser.parse_args()
47-
4845
args.method_name = args.solver_name if args.solver_name != 'GradAgg' \
4946
else '{}_{}'.format(args.solver_name, args.agg_name)
5047
problem = VLMOP1(n_var=10)
@@ -66,21 +63,17 @@
6663
ts = time() - ts
6764
y = res['y']
6865
fig = plt.figure()
69-
7066
plt.scatter(y[:, 0], y[:, 1], s=100)
7167
rho_arr = np.linalg.norm(y, axis=1)
7268
plt.xlabel('$f_1$', fontsize=20)
7369
plt.ylabel('$f_2$', fontsize=20)
74-
7570
plt.xlim([0,1])
7671
plt.ylim([0,1])
7772
plt.gca().set_aspect('equal', adjustable="box")
78-
7973
plt.xticks(fontsize=18)
8074
plt.yticks(fontsize=18)
8175
os.makedirs(folder_name, exist_ok=True)
8276
prefs = prefs.numpy()
83-
8477
prefs_norm = prefs / np.linalg.norm(prefs, axis=1, keepdims=True)
8578
rho_max = 1.2
8679
for pref in prefs_norm:
@@ -97,8 +90,6 @@
9790
plt.xlabel('Epoch', fontsize=20)
9891
plt.ylabel('Hypervolume', fontsize=20)
9992

100-
101-
10293
plt.xticks(fontsize=15)
10394
plt.yticks(fontsize=15)
10495
plt.tight_layout()

libmoon/auto_tester/test_synthetic.sh

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,9 @@
11
nepoch=1000
22

3+
4+
# Online using libmoon/auto_tester/X
5+
# pycharm using auto_tester/X
6+
37
for agg in STche
48
do
59
python libmoon/auto_tester/test_synthetic.py --solver-name GradAgg --agg-name $agg --n-epoch $nepoch

libmoon/solver/gradient/methods/base_solver.py

Lines changed: 4 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -93,14 +93,16 @@ def solve(self, problem, x, prefs):
9393
if self.solver_name == 'UMOD':
9494
self.pfl_model = PFLModel(n_obj=problem.n_obj)
9595
self.pfl_optimizer = torch.optim.Adam(self.pfl_model.parameters(), lr=1e-3)
96+
9697
self.n_prob, self.n_obj = prefs.shape[0], prefs.shape[1]
9798
xs_var = Variable(x, requires_grad=True)
9899
optimizer = Adam([xs_var], lr=self.step_size)
99100
ind = HV( ref_point=get_hv_ref(problem.problem_name) )
100101
hv_arr, y_arr = [], []
101102
# For UMOD solver, we need to store (pref, y) pairs.
102103
pref_y_pairs = []
103-
for epoch_idx in tqdm(range(self.epoch)):
104+
105+
for epoch_idx in range(self.epoch):
104106
fs_var = problem.evaluate(xs_var)
105107
y_np = fs_var.detach().numpy()
106108
y_arr.append(y_np)
@@ -195,32 +197,4 @@ def __init__(self, problem, prefs, step_size=1e-3, n_epoch=500, tol=1e-3,
195197
super().__init__(step_size, n_epoch, tol, core_solver=self.core_solver)
196198

197199
def solve(self, x_init):
198-
return super().solve(self.problem, x_init, self.prefs)
199-
# x = x_init
200-
# x = Variable(x, requires_grad=True)
201-
# ind = HV(ref_point=get_hv_ref(self.problem.problem_name))
202-
# hv_arr = []
203-
# y_arr, x_arr = [], []
204-
# prefs = Tensor(self.prefs)
205-
# optimizer = SGD([x], lr=self.step_size)
206-
# agg_func = get_agg_func(self.agg)
207-
# res = {}
208-
#
209-
# for epoch_idx in tqdm(range(self.n_epoch)):
210-
# y = self.problem.evaluate(x)
211-
# hv_arr.append(ind.do(y.detach().numpy()))
212-
# agg_val = agg_func(y, prefs)
213-
# optimizer.zero_grad()
214-
# torch.sum(agg_val).backward()
215-
# optimizer.step()
216-
# y_arr.append(y.detach().numpy())
217-
#
218-
# if 'lbound' in dir(self.problem):
219-
# x.data = torch.clamp(x.data, torch.Tensor(self.problem.lbound) + solution_eps,
220-
# torch.Tensor(self.problem.ubound) - solution_eps)
221-
# res['x'] = x.detach().numpy()
222-
# res['y'] = y.detach().numpy()
223-
# res['hv_history'] = np.array(hv_arr)
224-
# res['y_history'] = np.array(y_arr)
225-
# res['x_history'] = np.array(y_arr)
226-
# return res
200+
return super().solve(self.problem, x_init, self.prefs)

libmoon/solver/gradient/methods/pmgda_solver.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,7 @@
11
from libmoon.solver.gradient.methods.base_solver import GradBaseSolver
2+
3+
from libmoon.util.constant import root_name
4+
25
from torch.optim import SGD
36
from tqdm import tqdm
47
from pymoo.indicators.hv import HV
@@ -28,7 +31,8 @@ def __init__(self, n_var, prefs):
2831
self.n_prob, self.n_obj = prefs.shape[0], prefs.shape[1]
2932
self.n_var = n_var
3033
prefs_np = prefs.cpu().numpy() if type(prefs) == torch.Tensor else prefs
31-
self.config_name = 'D:\\pycharm_project\\libmoon\\libmoon\\config\\pmgda.json'
34+
self.config_name = os.path.join(root_name, 'config', 'pmgda.json')
35+
3236
json_file = open(self.config_name, 'r')
3337
self.config = json.load(json_file)
3438
self.h_eps = self.config['h_eps']
@@ -217,7 +221,6 @@ def get_Jhf(f_arr, pref, return_h=False):
217221

218222
class PMGDASolver(GradBaseSolver):
219223
# The PGMDA paper: http://arxiv.org/abs/2402.09492.
220-
221224
def __init__(self, problem, prefs, step_size=1e-3, n_epoch=500, tol=1e-3,
222225
sigma=0.1, h_tol=1e-3, folder_name=None):
223226
self.folder_name=folder_name

0 commit comments

Comments
 (0)