Skip to content

Commit 6c2effe

Browse files
committed
Run: pre-commit run all-files
1 parent a95b334 commit 6c2effe

File tree

2 files changed

+15
-8
lines changed

2 files changed

+15
-8
lines changed

examples/variational_inference/bayesian_neural_network_advi.ipynb

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -204,11 +204,11 @@
204204
" \"train_cols\": np.arange(X_train.shape[1]),\n",
205205
" \"obs_id\": np.arange(X_train.shape[0]),\n",
206206
" }\n",
207-
" \n",
207+
"\n",
208208
" with pm.Model(coords=coords) as neural_network:\n",
209209
" # Define minibatch variables\n",
210210
" minibatch_x, minibatch_y = pm.Minibatch(X_train, Y_train, batch_size=50)\n",
211-
" \n",
211+
"\n",
212212
" # Define data variables using minibatches\n",
213213
" ann_input = pm.Data(\"ann_input\", minibatch_x, mutable=True, dims=(\"obs_id\", \"train_cols\"))\n",
214214
" ann_output = pm.Data(\"ann_output\", minibatch_y, mutable=True, dims=\"obs_id\")\n",
@@ -241,8 +241,9 @@
241241
" )\n",
242242
" return neural_network\n",
243243
"\n",
244+
"\n",
244245
"# Create the neural network model\n",
245-
"neural_network = construct_nn()\n"
246+
"neural_network = construct_nn()"
246247
]
247248
},
248249
{

examples/variational_inference/bayesian_neural_network_advi.myst.md

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ A neural network is quite simple. The basic unit is a [perceptron](https://en.wi
114114
jupyter:
115115
outputs_hidden: true
116116
---
117-
def construct_nn(ann_input, ann_output):
117+
def construct_nn():
118118
n_hidden = 5
119119
120120
# Initialize random weights between each layer
@@ -128,9 +128,14 @@ def construct_nn(ann_input, ann_output):
128128
"train_cols": np.arange(X_train.shape[1]),
129129
"obs_id": np.arange(X_train.shape[0]),
130130
}
131+
131132
with pm.Model(coords=coords) as neural_network:
132-
ann_input = pm.Data("ann_input", X_train, dims=("obs_id", "train_cols"))
133-
ann_output = pm.Data("ann_output", Y_train, dims="obs_id")
133+
# Define minibatch variables
134+
minibatch_x, minibatch_y = pm.Minibatch(X_train, Y_train, batch_size=50)
135+
136+
# Define data variables using minibatches
137+
ann_input = pm.Data("ann_input", minibatch_x, mutable=True, dims=("obs_id", "train_cols"))
138+
ann_output = pm.Data("ann_output", minibatch_y, mutable=True, dims="obs_id")
134139
135140
# Weights from input to hidden layer
136141
weights_in_1 = pm.Normal(
@@ -155,13 +160,14 @@ def construct_nn(ann_input, ann_output):
155160
"out",
156161
act_out,
157162
observed=ann_output,
158-
total_size=Y_train.shape[0], # IMPORTANT for minibatches
163+
total_size=X_train.shape[0], # IMPORTANT for minibatches
159164
dims="obs_id",
160165
)
161166
return neural_network
162167
163168
164-
neural_network = construct_nn(X_train, Y_train)
169+
# Create the neural network model
170+
neural_network = construct_nn()
165171
```
166172

167173
That's not so bad. The `Normal` priors help regularize the weights. Usually we would add a constant `b` to the inputs but I omitted it here to keep the code cleaner.

0 commit comments

Comments
 (0)