Skip to content

Commit 39a811d

Browse files
committed
Format optimizers
1 parent 30066f0 commit 39a811d

File tree

2 files changed

+57
-73
lines changed

2 files changed

+57
-73
lines changed

tensorflow_quantum/python/optimizers/rotosolve_minimizer.py

Lines changed: 17 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414
# ==============================================================================
15-
"""The rotosolve minimization algorithm"""
16-
import collections
15+
"""The rotosolve minimization algorithm."""
1716
import numpy as np
1817
import tensorflow as tf
1918

@@ -46,6 +45,7 @@ def prefer_static_value(x):
4645

4746

4847
class RotosolveOptimizerResults(tf.experimental.ExtensionType):
48+
"""ExtentionType of Rotosolve Optimizer tf.while_loop() inner state."""
4949
converged: tf.Tensor
5050
# Scalar boolean tensor indicating whether the minimum
5151
# was found within tolerance.
@@ -60,7 +60,7 @@ class RotosolveOptimizerResults(tf.experimental.ExtensionType):
6060
# this value is the argmin of the objective function.
6161
# A tensor containing the value of the objective from
6262
# previous iteration
63-
objective_value_previous_iteration: tf.Tensor
63+
objective_value_prev: tf.Tensor
6464
# Save the evaluated value of the objective function
6565
# from the previous iteration
6666
objective_value: tf.Tensor
@@ -78,23 +78,16 @@ class RotosolveOptimizerResults(tf.experimental.ExtensionType):
7878
# modifying. Reserved for internal use.
7979

8080
def to_dict(self):
81+
"""Transforms immutable data to mutable dictionary."""
8182
return {
82-
"converged":
83-
self.converged,
84-
"num_iterations":
85-
self.num_iterations,
86-
"num_objective_evaluations":
87-
self.num_objective_evaluations,
88-
"position":
89-
self.position,
90-
"objective_value":
91-
self.objective_value,
92-
"objective_value_previous_iteration":
93-
self.objective_value_previous_iteration,
94-
"tolerance":
95-
self.tolerance,
96-
"solve_param_i":
97-
self.solve_param_i,
83+
"converged": self.converged,
84+
"num_iterations": self.num_iterations,
85+
"num_objective_evaluations": self.num_objective_evaluations,
86+
"position": self.position,
87+
"objective_value": self.objective_value,
88+
"objective_value_prev": self.objective_value_prev,
89+
"tolerance": self.tolerance,
90+
"solve_param_i": self.solve_param_i,
9891
}
9992

10093

@@ -106,7 +99,7 @@ def _get_initial_state(initial_position, tolerance, expectation_value_function):
10699
"num_objective_evaluations": tf.Variable(0),
107100
"position": tf.Variable(initial_position),
108101
"objective_value": expectation_value_function(initial_position),
109-
"objective_value_previous_iteration": tf.Variable(0.),
102+
"objective_value_prev": tf.Variable(0.),
110103
"tolerance": tolerance,
111104
"solve_param_i": tf.Variable(0),
112105
}
@@ -214,7 +207,7 @@ def _rotosolve_one_parameter_once(state):
214207
next_state_params.update({
215208
"solve_param_i": state.solve_param_i + 1,
216209
"position": new_position,
217-
"objective_value_previous_iteration": state.objective_value,
210+
"objective_value_prev": state.objective_value,
218211
"objective_value": (expectation_value_function(new_position)),
219212
})
220213
return [RotosolveOptimizerResults(**next_state_params)]
@@ -265,10 +258,9 @@ def _body(state):
265258
post_state = _rotosolve_all_parameters_once(pre_state)[0]
266259
next_state_params = post_state.to_dict()
267260
next_state_params.update({
268-
"converged":
269-
(tf.abs(post_state.objective_value -
270-
post_state.objective_value_previous_iteration) <
271-
post_state.tolerance),
261+
"converged": (tf.abs(post_state.objective_value -
262+
post_state.objective_value_prev) <
263+
post_state.tolerance),
272264
"num_iterations": post_state.num_iterations + 1,
273265
})
274266
return [RotosolveOptimizerResults(**next_state_params)]

tensorflow_quantum/python/optimizers/spsa_minimizer.py

Lines changed: 40 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414
# ==============================================================================
15-
"""The SPSA minimization algorithm"""
16-
import collections
15+
"""The SPSA minimization algorithm."""
1716
import tensorflow as tf
1817
import numpy as np
1918

@@ -46,6 +45,7 @@ def prefer_static_value(x):
4645

4746

4847
class SPSAOptimizerResults(tf.experimental.ExtensionType):
48+
"""ExtentionType of SPSA Optimizer tf.while_loop() inner state."""
4949
converged: tf.Tensor
5050
# Scalar boolean tensor indicating whether the minimum
5151
# was found within tolerance.
@@ -60,7 +60,7 @@ class SPSAOptimizerResults(tf.experimental.ExtensionType):
6060
# this value is the argmin of the objective function.
6161
# A tensor containing the value of the objective from
6262
# previous iteration
63-
objective_value_previous_iteration: tf.Tensor
63+
objective_value_prev: tf.Tensor
6464
# Save the evaluated value of the objective function
6565
# from the previous iteration
6666
objective_value: tf.Tensor
@@ -72,7 +72,7 @@ class SPSAOptimizerResults(tf.experimental.ExtensionType):
7272
# Define the stop criteria. Iteration will stop when the
7373
# objective value difference between two iterations is
7474
# smaller than tolerance
75-
lr: tf.Tensor
75+
learning_rate: tf.Tensor
7676
# Specifies the learning rate
7777
alpha: tf.Tensor
7878
# Specifies scaling of the learning rate
@@ -89,38 +89,27 @@ class SPSAOptimizerResults(tf.experimental.ExtensionType):
8989
# (only applies if blocking is true).
9090

9191
def to_dict(self):
92+
"""Transforms immutable data to mutable dictionary."""
9293
return {
93-
"converged":
94-
self.converged,
95-
"num_iterations":
96-
self.num_iterations,
97-
"num_objective_evaluations":
98-
self.num_objective_evaluations,
99-
"position":
100-
self.position,
101-
"objective_value":
102-
self.objective_value,
103-
"objective_value_previous_iteration":
104-
self.objective_value_previous_iteration,
105-
"tolerance":
106-
self.tolerance,
107-
"lr":
108-
self.lr,
109-
"alpha":
110-
self.alpha,
111-
"perturb":
112-
self.perturb,
113-
"gamma":
114-
self.gamma,
115-
"blocking":
116-
self.blocking,
117-
"allowed_increase":
118-
self.allowed_increase,
94+
"converged": self.converged,
95+
"num_iterations": self.num_iterations,
96+
"num_objective_evaluations": self.num_objective_evaluations,
97+
"position": self.position,
98+
"objective_value": self.objective_value,
99+
"objective_value_prev": self.objective_value_prev,
100+
"tolerance": self.tolerance,
101+
"learning_rate": self.learning_rate,
102+
"alpha": self.alpha,
103+
"perturb": self.perturb,
104+
"gamma": self.gamma,
105+
"blocking": self.blocking,
106+
"allowed_increase": self.allowed_increase,
119107
}
120108

121109

122110
def _get_initial_state(initial_position, tolerance, expectation_value_function,
123-
lr, alpha, perturb, gamma, blocking, allowed_increase):
111+
learning_rate, alpha, perturb, gamma, blocking,
112+
allowed_increase):
124113
"""Create SPSAOptimizerResults with initial state of search."""
125114
init_args = {
126115
"converged": tf.Variable(False),
@@ -129,9 +118,9 @@ def _get_initial_state(initial_position, tolerance, expectation_value_function,
129118
"position": tf.Variable(initial_position),
130119
"objective_value":
131120
(tf.cast(expectation_value_function(initial_position), tf.float32)),
132-
"objective_value_previous_iteration": tf.Variable(np.inf),
121+
"objective_value_prev": tf.Variable(np.inf),
133122
"tolerance": tolerance,
134-
"lr": tf.Variable(lr),
123+
"learning_rate": tf.Variable(learning_rate),
135124
"alpha": tf.Variable(alpha),
136125
"perturb": tf.Variable(perturb),
137126
"gamma": tf.Variable(gamma),
@@ -146,7 +135,7 @@ def minimize(expectation_value_function,
146135
tolerance=1e-5,
147136
max_iterations=200,
148137
alpha=0.602,
149-
lr=1.0,
138+
learning_rate=1.0,
150139
perturb=1.0,
151140
gamma=0.101,
152141
blocking=False,
@@ -188,7 +177,8 @@ def minimize(expectation_value_function,
188177
tolerance: Scalar `tf.Tensor` of real dtype. Specifies the tolerance
189178
for the procedure. If the supremum norm between two iteration
190179
vector is below this number, the algorithm is stopped.
191-
lr: Scalar `tf.Tensor` of real dtype. Specifies the learning rate
180+
learning_rate: Scalar `tf.Tensor` of real dtype.
181+
Specifies the learning rate.
192182
alpha: Scalar `tf.Tensor` of real dtype. Specifies scaling of the
193183
learning rate.
194184
perturb: Scalar `tf.Tensor` of real dtype. Specifies the size of the
@@ -227,7 +217,9 @@ def minimize(expectation_value_function,
227217
max_iterations = tf.convert_to_tensor(max_iterations,
228218
name='max_iterations')
229219

230-
lr_init = tf.convert_to_tensor(lr, name='initial_a', dtype='float32')
220+
learning_rate_init = tf.convert_to_tensor(learning_rate,
221+
name='initial_a',
222+
dtype='float32')
231223
perturb_init = tf.convert_to_tensor(perturb,
232224
name='initial_c',
233225
dtype='float32')
@@ -253,7 +245,7 @@ def _spsa_once(state):
253245
state.perturb * delta_shift)
254246

255247
gradient_estimate = (v_p - v_m) / (2 * state.perturb) * delta_shift
256-
update = state.lr * gradient_estimate
248+
update = state.learning_rate * gradient_estimate
257249
next_state_params = state.to_dict()
258250
next_state_params.update({
259251
"num_objective_evaluations":
@@ -263,11 +255,11 @@ def _spsa_once(state):
263255
current_obj = tf.cast(expectation_value_function(state.position -
264256
update),
265257
dtype=tf.float32)
266-
if state.objective_value_previous_iteration + \
258+
if state.objective_value_prev + \
267259
state.allowed_increase >= current_obj or not state.blocking:
268260
next_state_params.update({
269261
"position": state.position - update,
270-
"objective_value_previous_iteration": state.objective_value,
262+
"objective_value_prev": state.objective_value,
271263
"objective_value": current_obj
272264
})
273265

@@ -285,35 +277,35 @@ def _cond(state):
285277

286278
def _body(state):
287279
"""Main optimization loop."""
288-
new_lr = lr_init / (
280+
new_learning_rate = learning_rate_init / (
289281
(tf.cast(state.num_iterations + 1, tf.float32) +
290282
0.01 * tf.cast(max_iterations, tf.float32))**state.alpha)
291283
new_perturb = perturb_init / (tf.cast(state.num_iterations + 1,
292284
tf.float32)**state.gamma)
293285

294286
pre_state_params = state.to_dict()
295287
pre_state_params.update({
296-
"lr": new_lr,
288+
"learning_rate": new_learning_rate,
297289
"perturb": new_perturb,
298290
})
299291

300292
post_state = _spsa_once(SPSAOptimizerResults(**pre_state_params))[0]
301293
post_state_params = post_state.to_dict()
302294
tf.print("asdf", state.objective_value.dtype,
303-
state.objective_value_previous_iteration.dtype)
295+
state.objective_value_prev.dtype)
304296
post_state_params.update({
305297
"num_iterations":
306298
post_state.num_iterations + 1,
307-
"converged": (tf.abs(state.objective_value -
308-
state.objective_value_previous_iteration) <
309-
state.tolerance),
299+
"converged":
300+
(tf.abs(state.objective_value - state.objective_value_prev)
301+
< state.tolerance),
310302
})
311303
return [SPSAOptimizerResults(**post_state_params)]
312304

313305
initial_state = _get_initial_state(initial_position, tolerance,
314-
expectation_value_function, lr,
315-
alpha, perturb, gamma, blocking,
316-
allowed_increase)
306+
expectation_value_function,
307+
learning_rate, alpha, perturb, gamma,
308+
blocking, allowed_increase)
317309

318310
return tf.while_loop(cond=_cond,
319311
body=_body,

0 commit comments

Comments
 (0)