From 12a95867c52fb90c28c30fbcb6e9603a5d534fa7 Mon Sep 17 00:00:00 2001 From: jessegrabowski Date: Wed, 13 Nov 2024 23:18:54 +0800 Subject: [PATCH] add docstring for transform_adapter.py --- python/nutpie/transform_adapter.py | 36 ++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/python/nutpie/transform_adapter.py b/python/nutpie/transform_adapter.py index e0e601b..c0674e5 100644 --- a/python/nutpie/transform_adapter.py +++ b/python/nutpie/transform_adapter.py @@ -12,6 +12,42 @@ def make_transform_adapter( untransformed_dim=None, zero_init=True, ): + """ + Configure the normalizing flows + + Parameters + ---------- + verbose: bool, default False + If True, print console messages about loss. Warning: This gets spammy fast with multiple chains + window_size: int + + show_progress: bool, default False + Show a progress bar for each SGD optimization run + nn_depth: int, default 1 + Number of hidden layers in each normalizing flow layer + nn_width: int, optional + Width of the neural network. Trades computation cost during optimizaiton for better normalizing + flows. Can lead to problems if the number of parameters grows too much -- you may need to increase + tuning steps to fit them. + + If None, this is set to half the number of parameters being estimated in the posterior + num_layers: int, deafult 8 + The number of normalizing flow layers. More layers leads to more numerical instability, but more + freedom in the functional approximation. + num_diag_windows + learning_rate: float, default 1e-3 + Learning rate for the ADAM optimizer used in the normalizing flow optimization + scale_layer: False + Experimental, currently unused + untransformed_dim: int, optional + How many posterior parameters are passed unchanged in each flow layer. + zero_init: bool, default True + Experimental, if True it will initialize the network neurons with a small random value. + + Returns + ------- + + """ import traceback from functools import partial