Skip to content

Commit ebf26d5

Browse files
colintaku-y
authored andcommitted
Remove verbose argument
1 parent c04da31 commit ebf26d5

File tree

7 files changed

+40
-67
lines changed

7 files changed

+40
-67
lines changed

docs/source/notebooks/NUTS_scaling_using_ADVI.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -490,7 +490,7 @@
490490
],
491491
"source": [
492492
"with mdl:\n",
493-
" v_params = pm.variational.advi(n=100000, verbose=False) \n",
493+
" v_params = pm.variational.advi(n=100000) \n",
494494
"\n",
495495
"_ = plt.plot(-np.log10(-v_params.elbo_vals))"
496496
]

pymc3/model.py

Lines changed: 5 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -169,25 +169,16 @@ def logpt(self):
169169

170170

171171
class Model(Context, Factor):
172-
"""Encapsulates the variables and likelihood factors of a model.
172+
"""Encapsulates the variables and likelihood factors of a model."""
173173

174-
Parameters
175-
----------
176-
verbose : int
177-
Model verbosity setting, determining how much feedback various
178-
operations provide. Normal verbosity is verbose=1 (default), silence
179-
is verbose=0, high is any value greater than 1.
180-
"""
181-
182-
def __init__(self, verbose=1):
174+
def __init__(self):
183175
self.named_vars = {}
184176
self.free_RVs = []
185177
self.observed_RVs = []
186178
self.deterministics = []
187179
self.potentials = []
188180
self.missing_values = []
189181
self.model = self
190-
self.verbose = verbose
191182

192183
@property
193184
@memoize
@@ -288,7 +279,7 @@ def Var(self, name, dist, data=None):
288279
var = TransformedRV(name=name, distribution=dist, model=self,
289280
transform=dist.transform)
290281
pm._log.debug('Applied {transform}-transform to {name}'
291-
' and added transformed {orig_name} to model.'.format(
282+
' and added transformed {orig_name} to model.'.format(
292283
transform=dist.transform.name,
293284
name=name,
294285
orig_name='{}_{}_'.format(name, dist.transform.name)))
@@ -623,8 +614,8 @@ def __init__(self, name, data, distribution, model):
623614
self.data = {name: as_tensor(data, name, model, distribution)
624615
for name, data in data.items()}
625616

626-
self.missing_values = [data.missing_values for data in self.data.values()
627-
if data.missing_values is not None]
617+
self.missing_values = [datum.missing_values for datum in self.data.values()
618+
if datum.missing_values is not None]
628619
self.logp_elemwiset = distribution.logp(**self.data)
629620
self.model = model
630621
self.distribution = distribution

pymc3/sampling.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -69,8 +69,7 @@ def assign_step_methods(model, step=None, methods=(NUTS, HamiltonianMC, Metropol
6969
for var in model.free_RVs:
7070
if var not in assigned_vars:
7171
selected = max(methods, key=lambda method: method._competence(var))
72-
if model.verbose:
73-
pm._log.info('Assigned {0} to {1}'.format(selected.__name__, var))
72+
pm._log.info('Assigned {0} to {1}'.format(selected.__name__, var))
7473
selected_steps[selected].append(var)
7574

7675
# Instantiate all selected step methods

pymc3/tests/test_advi.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ def create_minibatches():
9595

9696
with self.assertRaises(ValueError):
9797
advi_minibatch(n=10, minibatch_RVs=[disasters], minibatch_tensors=[disaster_data_t],
98-
minibatches=create_minibatches(), verbose=False)
98+
minibatches=create_minibatches())
9999

100100
def test_advi(self):
101101
n = 1000
@@ -201,7 +201,7 @@ def create_minibatches(data):
201201
mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
202202
x = Normal('x', mu=mu_, sd=sd, observed=data_t)
203203
advi_fit = advi_minibatch(
204-
n=1000, minibatch_tensors=[data_t], encoder_params=[],
204+
n=1000, minibatch_tensors=[data_t], encoder_params=[],
205205
minibatch_RVs=[x], minibatches=create_minibatches(data),
206206
total_size=n, learning_rate=1e-1)
207207
np.testing.assert_allclose(advi_fit.means['mu'], mu_post, rtol=0.1)

pymc3/tuning/starting.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -55,9 +55,7 @@ def find_MAP(start=None, vars=None, fmin=None, return_raw=False,
5555

5656
disc_vars = list(typefilter(vars, discrete_types))
5757

58-
kwargs["disp"] = model.verbose > 1
59-
60-
if disc_vars and kwargs["disp"]:
58+
if disc_vars:
6159
pm._log.warning("Warning: vars contains discrete variables. MAP " +
6260
"estimates may not be accurate for the default " +
6361
"parameters. Defaulting to non-gradient minimization " +

pymc3/variational/advi.py

Lines changed: 13 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,7 @@ def gen_random_state():
3636

3737

3838
def advi(vars=None, start=None, model=None, n=5000, accurate_elbo=False,
39-
optimizer=None, learning_rate=.001, epsilon=.1, random_seed=None,
40-
verbose=1):
39+
optimizer=None, learning_rate=.001, epsilon=.1, random_seed=None):
4140
"""Perform automatic differentiation variational inference (ADVI).
4241
4342
This function implements the meanfield ADVI, where the variational
@@ -142,29 +141,21 @@ def advi(vars=None, start=None, model=None, n=5000, accurate_elbo=False,
142141
# Optimization loop
143142
elbos = np.empty(n)
144143
try:
145-
for i in range(n):
144+
progress = trange(n)
145+
for i in progress:
146146
uw_i, e = f()
147147
elbos[i] = e
148-
if verbose and not i % (n // 10):
149-
if not i:
150-
print('Iteration {0} [{1}%]: ELBO = {2}'.format(
151-
i, 100 * i // n, e.round(2)))
152-
else:
153-
avg_elbo = elbos[i - n // 10:i].mean()
154-
print('Iteration {0} [{1}%]: Average ELBO = {2}'.format(
155-
i, 100 * i // n, avg_elbo.round(2)))
148+
if i % (n // 10) == 0 and i > 0:
149+
avg_elbo = elbos[i - n // 10:i].mean()
150+
progress.set_description('Average ELBO = {:,.2f}'.format(avg_elbo))
156151
except KeyboardInterrupt:
157-
if verbose:
158-
elbos = elbos[:i]
159-
avg_elbo = elbos[i - n // 10:].mean()
160-
print('Interrupted at {0} [{1}%]: Average ELBO = {2}'.format(
161-
i, 100 * i // n, avg_elbo.round(2)))
152+
elbos = elbos[:i]
153+
avg_elbo = elbos[i - n // 10:].mean()
154+
pm._log.info('Interrupted at {:,d} [{:.0f}%]: Average ELBO = {:,.2f}'.format(
155+
i, 100 * i // n, avg_elbo))
162156
else:
163-
if verbose:
164-
avg_elbo = elbos[-n // 10:].mean()
165-
print(
166-
'Finished [100%]: Average ELBO = {}'.format(avg_elbo.round(2))
167-
)
157+
avg_elbo = elbos[-n // 10:].mean()
158+
pm._log.info('Finished [100%]: Average ELBO = {:,.2f}'.format(avg_elbo))
168159

169160
# Estimated parameters
170161
l = int(uw_i.size / 2)
@@ -360,7 +351,7 @@ def rvs(x):
360351

361352
range_ = trange(draws) if progressbar else range(draws)
362353

363-
for i in range_:
354+
for _ in range_:
364355
# 'point' is like {'var1': np.array(0.1), 'var2': np.array(0.2), ...}
365356
point = {varname: value for varname, value in zip(varnames, f())}
366357
trace.record(point)

pymc3/variational/advi_minibatch.py

Lines changed: 17 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,11 @@
44
import theano
55
import theano.tensor as tt
66
from theano.sandbox.rng_mrg import MRG_RandomStreams
7+
import tqdm
78

8-
from pymc3 import modelcontext, ArrayOrdering, DictToArrayBijection
9+
import pymc3 as pm
910
from pymc3.theanof import reshape_t, inputvars
10-
from .advi import check_discrete_rvs, ADVIFit, adagrad_optimizer, \
11-
gen_random_state
11+
from .advi import check_discrete_rvs, ADVIFit, adagrad_optimizer, gen_random_state
1212

1313
__all__ = ['advi_minibatch']
1414

@@ -68,7 +68,7 @@ def _get_rvss(
6868

6969
def _init_uw_global_shared(start, global_RVs, global_order):
7070
start = {v.name: start[v.name] for v in global_RVs}
71-
bij = DictToArrayBijection(global_order, start)
71+
bij = pm.DictToArrayBijection(global_order, start)
7272
u_start = bij.map(start)
7373
w_start = np.zeros_like(u_start)
7474
uw_start = np.concatenate([u_start, w_start]).astype(floatX_str)
@@ -207,8 +207,7 @@ def advi_minibatch(vars=None, start=None, model=None, n=5000, n_mcsamples=1,
207207
minibatch_RVs=None, minibatch_tensors=None,
208208
minibatches=None, local_RVs=None, observed_RVs=None,
209209
encoder_params=[], total_size=None, optimizer=None,
210-
learning_rate=.001, epsilon=.1, random_seed=None,
211-
verbose=1):
210+
learning_rate=.001, epsilon=.1, random_seed=None):
212211
"""Perform mini-batch ADVI.
213212
214213
This function implements a mini-batch ADVI with the meanfield
@@ -261,12 +260,12 @@ def advi_minibatch(vars=None, start=None, model=None, n=5000, n_mcsamples=1,
261260
model : Model
262261
Probabilistic model.
263262
n : int
264-
Number of interations updating parameters.
263+
Number of iterations updating parameters.
265264
n_mcsamples : int
266265
Number of Monte Carlo samples to approximate ELBO.
267266
minibatch_RVs : list of ObservedRVs
268267
Random variables in the model for which mini-batch tensors are set.
269-
When this argument is given, both of arguments local_RVs and
268+
When this argument is given, both of arguments local_RVs and
270269
observed_RVs must be None.
271270
minibatch_tensors : list of (tensors or shared variables)
272271
Tensors used to create ObservedRVs in minibatch_RVs.
@@ -306,7 +305,7 @@ def advi_minibatch(vars=None, start=None, model=None, n=5000, n_mcsamples=1,
306305
"""
307306
theano.config.compute_test_value = 'ignore'
308307

309-
model = modelcontext(model)
308+
model = pm.modelcontext(model)
310309
vars = inputvars(vars if vars is not None else model.vars)
311310
start = start if start is not None else model.test_point
312311
check_discrete_rvs(vars)
@@ -335,8 +334,8 @@ def get_transformed(v):
335334
global_RVs = list(set(vars) - set(list(local_RVs) + list(observed_RVs)))
336335

337336
# Ordering for concatenation of random variables
338-
global_order = ArrayOrdering([v for v in global_RVs])
339-
local_order = ArrayOrdering([v for v in local_RVs])
337+
global_order = pm.ArrayOrdering([v for v in global_RVs])
338+
local_order = pm.ArrayOrdering([v for v in local_RVs])
340339

341340
# ELBO wrt variational parameters
342341
inarray_g, uw_g, replace_g = _join_global_RVs(global_RVs, global_order)
@@ -387,20 +386,15 @@ def is_shared(t):
387386

388387
# Optimization loop
389388
elbos = np.empty(n)
390-
for i in range(n):
389+
progress = tqdm.trange(n)
390+
for i in progress:
391391
e = f(*next(minibatches))
392392
elbos[i] = e
393-
if verbose and not i % (n // 10):
394-
if not i:
395-
print('Iteration {0} [{1}%]: ELBO = {2}'.format(
396-
i, 100 * i // n, e.round(2)))
397-
else:
398-
avg_elbo = elbos[i - n // 10:i].mean()
399-
print('Iteration {0} [{1}%]: Average ELBO = {2}'.format(
400-
i, 100 * i // n, avg_elbo.round(2)))
401-
402-
if verbose:
403-
print('Finished [100%]: ELBO = {}'.format(elbos[-1].round(2)))
393+
if i % (n // 10) == 0 and i > 0:
394+
avg_elbo = elbos[i - n // 10:i].mean()
395+
progress.set_description('Average ELBO = {:,.2f}'.format(avg_elbo))
396+
397+
pm._log.info('Finished minibatch ADVI: ELBO = {:,.2f}'.format(elbos[-1]))
404398

405399
# Variational parameters of global RVs
406400
if 0 < len(global_RVs):

0 commit comments

Comments
 (0)