-
Notifications
You must be signed in to change notification settings - Fork 36
/
Copy pathesa.py
182 lines (165 loc) · 8.81 KB
/
esa.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
import numpy as np # engine for numerical computing
from pypop7.optimizers.core.optimizer import Optimizer # abstract class for all black-box optimizers (BBO)
from pypop7.optimizers.sa.sa import SA # abstract class for all simulated annealing (SA) subclasses
class ESA(SA):
"""Enhanced Simulated Annealing (ESA).
.. note:: `ESA` adopts a **random decomposition** strategy to alleviate the *curse of dimensionality* for
large-scale black-box optimization. Note that it shares some similaries (i.e., axis-parallel decomposition)
to the *Cooperative Coevolution* framework, which uses population-based sampling (rather than individual-based
sampling of `ESA`) for each subproblem (corresponding to a search subspace).
Parameters
----------
problem : dict
problem arguments with the following common settings (`keys`):
* 'fitness_function' - objective function to be **minimized** (`func`),
* 'ndim_problem' - number of dimensionality (`int`),
* 'upper_boundary' - upper boundary of search range (`array_like`),
* 'lower_boundary' - lower boundary of search range (`array_like`).
options : dict
optimizer options with the following common settings (`keys`):
* 'max_function_evaluations' - maximum of function evaluations (`int`, default: `np.inf`),
* 'max_runtime' - maximal runtime to be allowed (`float`, default: `np.inf`),
* 'seed_rng' - seed for random number generation needed to be *explicitly* set (`int`);
and with the following particular settings (`keys`):
* 'p' - subspace dimension (`int`, default: `int(np.ceil(problem['ndim_problem']/3))`),
* 'n1' - factor to control temperature stage w.r.t. accepted moves (`int`, default: `12`),
* 'n2' - factor to control temperature stage w.r.t. attempted moves (`int`, default: `100`).
Examples
--------
Use the black-box optimizer `ESA` to minimize the well-known test function
`Rosenbrock <http://en.wikipedia.org/wiki/Rosenbrock_function>`_:
.. code-block:: python
:linenos:
>>> import numpy # engine for numerical computing
>>> from pypop7.benchmarks.base_functions import rosenbrock # function to be minimized
>>> from pypop7.optimizers.sa.esa import ESA
>>> problem = {'fitness_function': rosenbrock, # define problem arguments
... 'ndim_problem': 2,
... 'lower_boundary': -5*numpy.ones((2,)),
... 'upper_boundary': 5*numpy.ones((2,))}
>>> options = {'max_function_evaluations': 5000, # set optimizer options
... 'seed_rng': 2022,
... 'x': 3*numpy.ones((2,))}
>>> esa = ESA(problem, options) # initialize the optimizer class
>>> results = esa.optimize() # run the optimization process
>>> # return the number of function evaluations and best-so-far fitness
>>> print(f"ESA: {results['n_function_evaluations']}, {results['best_so_far_y']}")
ESA: 5000, 6.481109148014023
For its correctness checking of coding, refer to `this code-based repeatability report
<https://tinyurl.com/3e2k39z2>`_ for details.
For its `pytest <https://docs.pytest.org/>`_ based testing, please refer to `this Python code
</~https://github.com/Evolutionary-Intelligence/pypop/blob/main/pypop7/optimizers/sa/test_esa.py>`_.
Attributes
----------
n1 : `int`
factor to control temperature stage w.r.t. accepted moves.
n2 : `int`
factor to control temperature stage w.r.t. attempted moves.
p : `int`
subspace dimension.
References
----------
Siarry, P., Berthiau, G., Durdin, F. and Haussy, J., 1997.
`Enhanced simulated annealing for globally minimizing functions of many-continuous variables.
<https://dl.acm.org/doi/abs/10.1145/264029.264043>`_
ACM Transactions on Mathematical Software, 23(2), pp.209-228.
"""
def __init__(self, problem, options):
SA.__init__(self, problem, options)
self.n1 = options.get('n1', 12) # factor to control temperature stage w.r.t. accepted moves
assert self.n1 > 0
self.n2 = options.get('n2', 100) # factor to control temperature stage w.r.t. attempted moves
assert self.n2 > 0
self.p = options.get('p', int(np.ceil(self.ndim_problem/3))) # number of subspaces
assert self.p > 0
self.verbose = options.get('verbose', 10)
# set parameters at current temperature stage
self._elowst = None
self._avgyst = 0
self._mvokst = 0 # number of accepted moves
self._mokst = np.zeros((self.ndim_problem,)) # numbers of accepted moves for each dimension
self._nmvst = 0 # number of attempted moves
self._mtotst = np.zeros((self.ndim_problem,)) # numbers of attempted moves for each dimension
self._v = None # step vector
def initialize(self, args=None):
self._v = 0.25*(self.upper_boundary - self.lower_boundary)
if self.x is None: # starting point
x = self.rng_initialization.uniform(self.initial_lower_boundary, self.initial_upper_boundary)
else:
x = np.copy(self.x)
assert len(x) == self.ndim_problem
y = self._evaluate_fitness(x, args)
self.parent_x, self.parent_y = np.copy(x), np.copy(y)
fitness = [y]
if self.temperature is None:
for _ in range(49):
if self._check_terminations():
break
xx = self.rng_initialization.uniform(self.lower_boundary, self.upper_boundary)
yy = self._evaluate_fitness(xx, args)
if self.saving_fitness:
fitness.append(yy)
self.temperature = -np.mean(fitness)/np.log(0.5)
return fitness
def iterate(self, p=None, args=None):
fitness = []
for k in p: # without over-selecting
if self._check_terminations():
return fitness
x, sign = np.copy(self.parent_x), self.rng_optimization.choice([-1, 1])
xx = x[k] + sign*self._v[k]
if (xx < self.lower_boundary[k]) or (xx > self.upper_boundary[k]):
xx = x[k] - sign*self._v[k]
x[k] = np.maximum(np.minimum(xx, self.upper_boundary[k]), self.lower_boundary[k])
y = self._evaluate_fitness(x, args)
if self.saving_fitness:
fitness.append(y)
self._avgyst += y
self._mtotst[k] += 1
self._nmvst += 1
diff = self.parent_y - y
if (diff >= 0) or (self.rng_optimization.random() < np.exp(diff/self.temperature)):
self.parent_x, self.parent_y = np.copy(x), np.copy(y)
self._mokst[k] += 1
self._mvokst += 1
if (diff >= 0) and (y < self._elowst):
self._elowst = y
return fitness
def _adjust_step_vector(self):
for k in range(self.ndim_problem):
if self._mtotst[k] > 0:
rok = self._mokst[k]/self._mtotst[k]
if rok > 0.2:
self._v[k] *= 2.0
elif rok < 0.05:
self._v[k] *= 0.5
self._v[k] = np.minimum(self._v[k], self.upper_boundary[k] - self.lower_boundary[k])
def _reset_parameters(self):
self._mvokst = 0
self._mokst = np.zeros((self.ndim_problem,))
self._nmvst = 0
self._mtotst = np.zeros((self.ndim_problem,))
self._elowst = self.parent_y
self._avgyst = 0
def optimize(self, fitness_function=None, args=None): # for all iterations (generations)
fitness = Optimizer.optimize(self, fitness_function)
y = self.initialize(args)
self._elowst = y[0]
self._print_verbose_info(fitness, y)
while not self._check_terminations():
p, n_p = self.rng_optimization.permutation(self.ndim_problem), 0
while (self._mvokst < self.n1*self.ndim_problem) and (self._nmvst < self.n2*self.ndim_problem):
if self._check_terminations():
break
n_p += 1
new_fitness = self.iterate(p[(self.p*(n_p - 1)):(self.p*n_p)], args)
self._n_generations += 1
if len(new_fitness) > 0:
self._print_verbose_info(fitness, new_fitness)
if self.p*n_p >= self.ndim_problem: # to re-partition
p, n_p = self.rng_optimization.permutation(self.ndim_problem), 0
self._avgyst /= self._nmvst
self.temperature *= np.maximum(np.minimum(self._elowst/self._avgyst, 0.9), 0.1)
self._adjust_step_vector()
self._reset_parameters()
return self._collect(fitness)