|
25 | 25 | "outputs": [], |
26 | 26 | "source": [ |
27 | 27 | "import os\n", |
28 | | - "os.environ[\"AESARA_FLAGS\"] = \"floatX=float64\"" |
| 28 | + "os.environ[\"pytensor_FLAGS\"] = \"floatX=float64\"" |
29 | 29 | ] |
30 | 30 | }, |
31 | 31 | { |
|
48 | 48 | "from scipy.integrate import odeint\n", |
49 | 49 | "\n", |
50 | 50 | "import pymc as pm\n", |
51 | | - "import aesara\n", |
52 | | - "import aesara.tensor as aet\n", |
| 51 | + "import pytensor\n", |
| 52 | + "import pytensor.tensor as pt\n", |
53 | 53 | "\n", |
54 | 54 | "# this notebook show DEBUG log messages\n", |
55 | 55 | "# logging.getLogger('pymc').setLevel(logging.DEBUG)\n", |
|
150 | 150 | "outputs": [], |
151 | 151 | "source": [ |
152 | 152 | "# To demonstrate that test-value computation works, but also for debugging\n", |
153 | | - "aesara.config.compute_test_value = 'raise'\n", |
154 | | - "aesara.config.exception_verbosity = 'high'\n", |
155 | | - "aesara.config.traceback.limit = 5" |
| 153 | + "pytensor.config.compute_test_value = 'raise'\n", |
| 154 | + "pytensor.config.exception_verbosity = 'high'\n", |
| 155 | + "pytensor.config.traceback.limit = 5" |
156 | 156 | ] |
157 | 157 | }, |
158 | 158 | { |
|
176 | 176 | "metadata": {}, |
177 | 177 | "outputs": [], |
178 | 178 | "source": [ |
179 | | - "import sunode.wrappers.as_aesara\n", |
| 179 | + "import sunode.wrappers.as_pytensor\n", |
180 | 180 | "\n", |
181 | 181 | "\n", |
182 | 182 | "def get_model_sunode():\n", |
|
187 | 187 | " s0 = pm.Normal('red_0', mu=10, sigma=2)\n", |
188 | 188 | " extra = pm.Normal('extra', shape=n_extra)\n", |
189 | 189 | "\n", |
190 | | - " y_hat, _, _ = sunode.wrappers.as_aesara.solve_ivp(\n", |
| 190 | + " y_hat, _, _ = sunode.wrappers.as_pytensor.solve_ivp(\n", |
191 | 191 | " y0={\n", |
192 | 192 | " 'S': (s0, ()), # TODO Infer shape from model?\n", |
193 | 193 | " 'P': np.array(y0_true[1], dtype='d'),\n", |
|
196 | 196 | " params={\n", |
197 | 197 | " 'K_S': (K_S, ()),\n", |
198 | 198 | " 'vmax': (vmax, ()),\n", |
199 | | - " 'tmp': np.zeros(1), # TODO aesara wants at least one fixed param\n", |
| 199 | + " 'tmp': np.zeros(1), # TODO pytensor wants at least one fixed param\n", |
200 | 200 | " 'extra_p': (extra, (n_extra,))\n", |
201 | 201 | " },\n", |
202 | 202 | " rhs=reaction_sympy,\n", |
|
235 | 235 | " \n", |
236 | 236 | " extra = pm.Normal('extra', shape=n_extra)\n", |
237 | 237 | "\n", |
238 | | - " y_hat, problem, _ = sunode.wrappers.as_aesara.solve_ivp(\n", |
| 238 | + " y_hat, problem, _ = sunode.wrappers.as_pytensor.solve_ivp(\n", |
239 | 239 | " y0={\n", |
240 | 240 | " 'S': (s0, ()), # TODO Infer shape from model?\n", |
241 | 241 | " 'P': np.array(y0_true[1], dtype='d'),\n", |
|
244 | 244 | " params={\n", |
245 | 245 | " 'K_S': (K_S, ()),\n", |
246 | 246 | " 'vmax': (vmax, ()),\n", |
247 | | - " 'tmp': np.zeros(1), # TODO aesara wants at least one fixed param\n", |
| 247 | + " 'tmp': np.zeros(1), # TODO pytensor wants at least one fixed param\n", |
248 | 248 | " 'extra_p': (extra, (n_extra,))\n", |
249 | 249 | " },\n", |
250 | 250 | " rhs=reaction_sympy,\n", |
|
305 | 305 | " \n", |
306 | 306 | " # create a test function for evaluating the logp value\n", |
307 | 307 | " print('Compiling f_logpt')\n", |
308 | | - " f_logpt = aesara.function(\n", |
| 308 | + " f_logpt = pytensor.function(\n", |
309 | 309 | " inputs=t_inputs,\n", |
310 | 310 | " outputs=[pmodel.logpt],\n", |
311 | 311 | " # with float32, allow downcast because the forward integration is always float64\n", |
312 | | - " allow_input_downcast=(aesara.config.floatX == 'float32')\n", |
| 312 | + " allow_input_downcast=(pytensor.config.floatX == 'float32')\n", |
313 | 313 | " )\n", |
314 | 314 | " print(f'Test logpt:')\n", |
315 | 315 | " print(f_logpt(*test_inputs))\n", |
316 | 316 | " \n", |
317 | 317 | " # and another test function for evaluating the gradient\n", |
318 | 318 | " print('Compiling f_logpt')\n", |
319 | | - " f_grad = aesara.function(\n", |
| 319 | + " f_grad = pytensor.function(\n", |
320 | 320 | " inputs=t_inputs,\n", |
321 | | - " outputs=aet.grad(pmodel.logpt, t_inputs),\n", |
| 321 | + " outputs=pt.grad(pmodel.logpt, t_inputs),\n", |
322 | 322 | " # with float32, allow downcast because the forward integration is always float64\n", |
323 | | - " allow_input_downcast=(aesara.config.floatX == 'float32')\n", |
| 323 | + " allow_input_downcast=(pytensor.config.floatX == 'float32')\n", |
324 | 324 | " )\n", |
325 | 325 | " print(f'Test gradient:')\n", |
326 | 326 | " print(f_grad(*test_inputs))\n", |
|
984 | 984 | "metadata": {}, |
985 | 985 | "outputs": [], |
986 | 986 | "source": [ |
987 | | - "aesara.printing.pydotprint(aet.grad(model_sunode.logpt, model_sunode.vmax), 'ODE_API_shapes_and_benchmarking.png')\n", |
| 987 | + "pytensor.printing.pydotprint(pt.grad(model_sunode.logpt, model_sunode.vmax), 'ODE_API_shapes_and_benchmarking.png')\n", |
988 | 988 | "IPython.display.Image('ODE_API_shapes_and_benchmarking.png')" |
989 | 989 | ] |
990 | 990 | }, |
|
1007 | 1007 | "metadata": {}, |
1008 | 1008 | "outputs": [], |
1009 | 1009 | "source": [ |
1010 | | - "from aesara import d3viz\n", |
| 1010 | + "from pytensor import d3viz\n", |
1011 | 1011 | "d3viz.d3viz(model.logpt, 'ODE_API_shapes_and_benchmarking.html')" |
1012 | 1012 | ] |
1013 | 1013 | }, |
|
1041 | 1041 | "nbformat": 4, |
1042 | 1042 | "nbformat_minor": 4 |
1043 | 1043 | } |
1044 | | - |
0 commit comments