Skip to content
This repository was archived by the owner on Feb 26, 2025. It is now read-only.

Commit 9316237

Browse files
author
Tanguy Damart
committed
Changed hv computation to Monte-Carlo HypE algorithm
1 parent a1b5c1e commit 9316237

File tree

3 files changed

+167
-75
lines changed

3 files changed

+167
-75
lines changed

bluepyopt/deapext/CMA_MO.py

Lines changed: 79 additions & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -32,34 +32,36 @@
3232

3333
from .stoppingCriteria import MaxNGen
3434
from . import utils
35+
from . import hype
3536

3637
logger = logging.getLogger('__main__')
3738

38-
from deap.tools._hypervolume import hv as hv_c
3939

40+
def get_hyped(pop):
41+
# Cap the obj at 250
42+
points = numpy.array([ind.fitness.values for ind in pop])
43+
points[points > 250.] = 250.
44+
lbounds = numpy.min(points, axis=0)
45+
ubounds = numpy.max(points, axis=0)
4046

41-
def get_hv(to_evaluate):
42-
i = to_evaluate[0]
43-
wobj = to_evaluate[1]
44-
ref = to_evaluate[2]
45-
return hv_c.hypervolume(numpy.concatenate((wobj[:i], wobj[i + 1:])), ref)
47+
# Remove the dimensions that do not show any improvement
48+
to_remove = []
49+
for i, (lb, ub) in enumerate(zip(lbounds, ubounds)):
50+
if lb >= 240:
51+
to_remove.append(i)
52+
points = numpy.delete(points, to_remove, axis=1)
53+
lbounds = numpy.delete(lbounds, to_remove)
54+
ubounds = numpy.delete(ubounds, to_remove)
4655

56+
# Rescale the objective space
57+
points = (points - lbounds) / numpy.max(ubounds.flatten())
58+
ubounds = numpy.max(points, axis=0) + 2.
4759

48-
def contribution(to_evaluate):
49-
def _reduce_method(meth):
50-
"""Overwrite reduce"""
51-
return (getattr, (meth.__self__, meth.__func__.__name__))
52-
53-
import copyreg
54-
import types
55-
copyreg.pickle(types.MethodType, _reduce_method)
56-
import pebble
57-
58-
with pebble.ProcessPool(max_tasks=1) as pool:
59-
tasks = pool.schedule(get_hv, kwargs={'to_evaluate': to_evaluate})
60-
response = tasks.result()
61-
62-
return response
60+
hv = hype.hypeIndicatorSampled(points=points,
61+
bounds=ubounds,
62+
k=5,
63+
nrOfSamples=200000)
64+
return hv
6365

6466

6567
class CMA_MO(cma.StrategyMultiObjective):
@@ -72,6 +74,7 @@ def __init__(self,
7274
max_ngen,
7375
IndCreator,
7476
RandIndCreator,
77+
weight_hv=0.5,
7578
map_function=None,
7679
use_scoop=False):
7780
"""Constructor
@@ -105,6 +108,8 @@ def __init__(self,
105108

106109
self.population = []
107110
self.problem_size = len(starters[0])
111+
112+
self.weight_hv = weight_hv
108113

109114
self.map_function = map_function
110115
self.use_scoop = use_scoop
@@ -130,31 +135,6 @@ def __init__(self,
130135

131136
self.stopping_conditions = [MaxNGen(max_ngen)]
132137

133-
def hyper_volume(self, front):
134-
"""Compute the hypervolume contribution of each individual"""
135-
wobj = numpy.array([ind.fitness.values for ind in front])
136-
obj_ranges = (numpy.max(wobj, axis=0) - numpy.min(wobj, axis=0))
137-
ref = numpy.max(wobj, axis=0) + 1
138-
139-
# Above 23 dimension, the hypervolume computation is too slow,
140-
# we settle for the 23 dimension showing the largest range of values
141-
max_ndim = 23
142-
if len(ref) > max_ndim:
143-
idxs = list(range(len(ref)))
144-
idxs = [idxs[k] for k in numpy.argsort(obj_ranges)]
145-
idxs = idxs[::-1]
146-
idxs = idxs[:max_ndim]
147-
wobj = wobj[:, idxs]
148-
ref = ref[idxs]
149-
150-
# Prepare the data and send it to multiprocess
151-
to_evaluate = []
152-
for i in range(len(front)):
153-
to_evaluate.append([i, numpy.copy(wobj), numpy.copy(ref)])
154-
contrib_values = self.map_function(contribution, to_evaluate)
155-
156-
return list(contrib_values)
157-
158138
def _select(self, candidates):
159139
"""Select the best candidates of the population
160140
@@ -163,36 +143,60 @@ def _select(self, candidates):
163143
we rely on the hypervolume for this front. The remaining fronts are
164144
explicitly not chosen"""
165145

166-
if len(candidates) <= self.mu:
167-
return candidates, []
168-
169-
pareto_fronts = deap.tools.sortLogNondominated(candidates,
170-
len(candidates))
171-
172-
chosen = list()
173-
mid_front = None
174-
not_chosen = list()
175-
176-
full = False
177-
for front in pareto_fronts:
178-
if len(chosen) + len(front) <= self.mu and not full:
179-
chosen += front
180-
elif mid_front is None and len(chosen) < self.mu:
181-
mid_front = front
182-
# With this front, we selected enough individuals
183-
full = True
184-
else:
185-
not_chosen += front
186-
187-
# Hypervolume contribution to get the best candidates on the remaining
188-
# front
189-
k = self.mu - len(chosen)
190-
if k > 0:
191-
hyperv = self.hyper_volume(mid_front)
192-
_ = [mid_front[k] for k in numpy.argsort(hyperv)]
193-
chosen += _[:k]
194-
not_chosen += _[k:]
146+
#if len(candidates) <= self.mu:
147+
# return candidates, []
148+
149+
#pareto_fronts = deap.tools.sortLogNondominated(candidates,
150+
# len(candidates))
151+
152+
#chosen = list()
153+
#mid_front = None
154+
#not_chosen = list()
155+
156+
#full = False
157+
#for front in pareto_fronts:
158+
# if len(chosen) + len(front) <= self.mu and not full:
159+
# chosen += front
160+
# elif mid_front is None and len(chosen) < self.mu:
161+
# mid_front = front
162+
# # With this front, we selected enough individuals
163+
# full = True
164+
# else:
165+
# not_chosen += front
166+
167+
# HypE contribution to get the best candidates on the remaining front
168+
#k = self.mu - len(chosen)
169+
#if k > 0:
170+
# contribution = get_hyped(mid_front)
171+
# print(contribution)
172+
# idxs = numpy.argsort(contribution)
173+
# ordered_front = [mid_front[i] for i in idxs[::-1]]
174+
# chosen += ordered_front[:k]
175+
# not_chosen += ordered_front[k:]
176+
177+
if self.weight_hv == 0.:
178+
fit = [numpy.sum(ind.fitness.values) for ind in candidates]
179+
idx_fit = list(numpy.argsort(fit))
180+
idx_scores = idx_fit[:]
181+
182+
elif self.weight_hv == 1.:
183+
hv = get_hyped(candidates)
184+
idx_hv = list(numpy.argsort(hv))[::-1]
185+
idx_scores = idx_hv[:]
195186

187+
else:
188+
hv = get_hyped(candidates)
189+
idx_hv = list(numpy.argsort(hv))[::-1]
190+
fit = [numpy.sum(ind.fitness.values) for ind in candidates]
191+
idx_fit = list(numpy.argsort(fit))
192+
scores = []
193+
for i in range(len(candidates)):
194+
score = (self.weight_hv * idx_hv.index(i)) + ((1.-self.weight_hv) * idx_fit.index(i))
195+
scores.append(score)
196+
idx_scores = list(numpy.argsort(scores))
197+
198+
chosen = [candidates[i] for i in idx_scores[:self.mu]]
199+
not_chosen = [candidates[i] for i in idx_scores[self.mu:]]
196200
return chosen, not_chosen
197201

198202
def get_population(self, to_space):

bluepyopt/deapext/hype.py

Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
import numpy
2+
3+
def hypesub(l, A, actDim, bounds, pvec, alpha, k):
4+
5+
h = numpy.zeros(l)
6+
i = numpy.argsort(A[:, actDim - 1])
7+
S = A[i]
8+
pvec = pvec[i]
9+
10+
for i in range(1, S.shape[0] + 1):
11+
12+
if i < S.shape[0]:
13+
extrusion = S[i, actDim - 1] - S[i - 1, actDim - 1]
14+
else:
15+
extrusion = bounds[actDim - 1] - S[i - 1, actDim - 1]
16+
17+
if actDim == 1:
18+
if i > k:
19+
break
20+
if alpha[i - 1] >= 0:
21+
h[pvec[0:i]] += extrusion * alpha[i - 1]
22+
elif extrusion > 0.:
23+
h += extrusion * hypesub(l, S[0:i, :], actDim - 1, bounds,
24+
pvec[0:i], alpha, k)
25+
26+
return h
27+
28+
29+
def hypeIndicatorExact(points, bounds, k):
30+
"""
31+
points: objectives (to be minimized),
32+
bounds: reference point,
33+
k: parameter of HypE
34+
35+
Example: scores = hypeIndicatorExact([[1., 3.], [3., 1.]], [4., 4.], 1)
36+
"""
37+
38+
Ps = points.shape[0]
39+
if k < 0:
40+
k = Ps
41+
actDim = points.shape[1]
42+
pvec = numpy.arange(points.shape[0])
43+
44+
alpha = []
45+
for i in range(1, k + 1):
46+
j = numpy.arange(1, i)
47+
alpha.append(numpy.prod((k - j) / (Ps - j) / i))
48+
alpha = numpy.asarray(alpha)
49+
50+
return hypesub(points.shape[0], points, actDim, bounds, pvec, alpha, k)
51+
52+
53+
def hypeIndicatorSampled(points, bounds, k, nrOfSamples):
54+
55+
nrP = points.shape[0]
56+
dim = points.shape[1]
57+
F = numpy.zeros(nrP)
58+
59+
BoxL = numpy.min(points, axis=0)
60+
61+
alpha = []
62+
for i in range(1, k + 1):
63+
j = numpy.arange(1, i)
64+
alpha.append(numpy.prod((k - j) / (nrP - j) / i))
65+
alpha = numpy.asarray(alpha + [0.]*nrP)
66+
67+
S = numpy.random.uniform(low=BoxL, high=bounds, size=(nrOfSamples, dim))
68+
69+
70+
dominated = numpy.zeros(nrOfSamples, dtype="uint")
71+
for j in range(1, nrP+1):
72+
B = S - points[j-1]
73+
ind = numpy.sum(B >= 0, axis=1) == dim
74+
dominated[ind] += 1
75+
76+
for j in range(1, nrP+1):
77+
B = S - points[j-1]
78+
ind = numpy.sum(B >= 0, axis=1) == dim
79+
x = dominated[ind]
80+
F[j-1] = numpy.sum(alpha[x-1])
81+
82+
F = F * numpy.prod(bounds - BoxL) / nrOfSamples
83+
84+
return F
85+

bluepyopt/deapext/optimisationsCMA.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ def __init__(self,
5454
map_function=None,
5555
hof=None,
5656
selector_name="single_objective",
57+
weight_hv=0.5,
5758
fitness_reduce=numpy.sum):
5859
"""Constructor
5960
@@ -89,6 +90,7 @@ def __init__(self,
8990
self.offspring_size = offspring_size
9091
self.centroids = centroids
9192
self.sigma = sigma
93+
self.weight_hv = weight_hv
9294

9395
self.selector_name = selector_name
9496
if self.selector_name == 'single_objective':
@@ -245,6 +247,7 @@ def run(self,
245247
use_scoop=self.use_scoop)
246248

247249
if self.selector_name == 'multi_objective':
250+
CMA_es.weight_hv = self.weight_hv
248251
to_evaluate = CMA_es.get_parents(self.to_space)
249252
fitness = self.toolbox.map(self.toolbox.evaluate, to_evaluate)
250253
fitness = list(map(list, fitness))

0 commit comments

Comments
 (0)