@@ -78,22 +78,6 @@ array([[1., 1., 1., 1.],
7878JaxArray([[1 ., 1 ., 1 ., 1 .],
7979 [0 ., 0 ., 0 ., 0 .]], dtype = float32)
8080
81- # mathematical functions
82- >> > np.sin(np_arr)
83- array([[0.84147098 , 0.84147098 , 0.84147098 , 0.84147098 ],
84- [0 . , 0 . , 0 . , 0 . ]])
85- >> > bm.sin(bm_arr)
86- JaxArray([[0.84147096 , 0.84147096 , 0.84147096 , 0.84147096 ],
87- [0 . , 0 . , 0 . , 0 . ]], dtype = float32)
88-
89- # linear algebra
90- >> > np.dot(np_arr, np.ones((4 , 2 )))
91- array([[4 ., 4 .],
92- [0 ., 0 .]])
93- >> > bm.dot(bm_arr, bm.ones((4 , 2 )))
94- JaxArray([[4 ., 4 .],
95- [0 ., 0 .]], dtype = float32)
96-
9781# random number generation
9882>> > np.random.uniform(- 0.1 , 0.1 , (2 , 3 ))
9983array([[- 0.02773637 , 0.03766689 , - 0.01363128 ],
@@ -118,9 +102,6 @@ def lorenz_system(x, y, z, t):
118102 dy = x * (rho - z) - y
119103 dz = x * y - beta * z
120104 return dx, dy, dz
121-
122- runner = bp.integrators.IntegratorRunner(lorenz_system, dt = 0.01 )
123- runner.run(100 .)
124105```
125106
126107
@@ -129,41 +110,45 @@ Numerical methods for stochastic differential equations (SDEs).
129110
130111``` python
131112sigma = 10 ; beta = 8 / 3 ; rho = 28
132- p= 0.1
133113
134114def lorenz_noise (x , y , z , t ):
135- return p * x, p * y, p * z
115+ return 0.1 * x, 0.1 * y, 0.1 * z
136116
137117@bp.odeint (method = ' milstein' , g = lorenz_noise)
138118def lorenz_system (x , y , z , t ):
139119 dx = sigma * (y - x)
140120 dy = x * (rho - z) - y
141121 dz = x * y - beta * z
142122 return dx, dy, dz
143-
144- runner = bp.integrators.IntegratorRunner(lorenz_system, dt = 0.01 )
145- runner.run(100 .)
146123```
147124
148125
149126
150- Numerical methods for delay differential equations (SDEs ).
127+ Numerical methods for delay differential equations (DDEs ).
151128
152129``` python
153130xdelay = bm.TimeDelay(bm.zeros(1 ), delay_len = 1 ., before_t0 = 1 ., dt = 0.01 )
154131
155-
156132@bp.ddeint (method = ' rk4' , state_delays = {' x' : xdelay})
157133def second_order_eq (x , y , t ):
158134 dx = y
159135 dy = - y - 2 * x - 0.5 * xdelay(t - 1 )
160136 return dx, dy
137+ ```
161138
162139
163- runner = bp.integrators.IntegratorRunner(second_order_eq, dt = 0.01 )
164- runner.run(100 .)
165- ```
140+ Numerical methods for fractional differential equations (FDEs).
166141
142+ ``` python
143+ sigma = 10 ; beta = 8 / 3 ; rho = 28
144+
145+ @bp.fdeint (method = ' GLShortMemory' , alpha = 0.97 )
146+ def fractional_lorenz (x , y , z , t ):
147+ dx = sigma * (y - x)
148+ dy = x * (rho - z) - y
149+ dz = x * y - beta * z
150+ return dx, dy, dz
151+ ```
167152
168153
169154### 3. Dynamics simulation level
@@ -184,36 +169,23 @@ class EINet(bp.dyn.Network):
184169 I2I = bp.dyn.ExpCOBA(I, I, bp.conn.FixedProb(prob = 0.02 ), E = - 80 ., g_max = 6.7 , tau = 10 .)
185170
186171 super (EINet, self ).__init__ (E2E , E2I , I2E , I2I , E = E, I = I)
187-
188-
189- net = EINet()
190- runner = bp.dyn.DSRunner(net)
191- runner(100 .)
192172```
193173
194- Simulating a whole brain network by using rate models.
174+ Simulating a whole- brain network by using rate models.
195175
196176``` python
197- import numpy as np
198-
199177class WholeBrainNet (bp .dyn .Network ):
200- def __init__ (self , signal_speed = 20 . ):
178+ def __init__ (self ):
201179 super (WholeBrainNet, self ).__init__ ()
202180
203- self .fhn = bp.dyn.RateFHN(80 , x_ou_sigma = 0.01 , y_ou_sigma = 0.01 , name = ' fhn' )
204- self .syn = bp.dyn.DiffusiveDelayCoupling(self .fhn, self .fhn,
205- ' x->input' ,
206- conn_mat = conn_mat,
207- delay_mat = delay_mat)
181+ self .areas = bp.dyn.RateFHN(80 , x_ou_sigma = 0.01 , y_ou_sigma = 0.01 , name = ' fhn' )
182+ self .conns = bp.dyn.DiffusiveDelayCoupling(self .areas, self .areas, ' x->input' ,
183+ conn_mat = conn_mat,
184+ delay_mat = delay_mat)
208185
209186 def update (self , _t , _dt ):
210- self .syn.update(_t, _dt)
211- self .fhn.update(_t, _dt)
212-
213-
214- net = WholeBrainNet()
215- runner = bp.dyn.DSRunner(net, monitors = [' fhn.x' ], inputs = [' fhn.input' , 0.72 ])
216- runner.run(6e3 )
187+ self .conns.update(_t, _dt)
188+ self .areas.update(_t, _dt)
217189```
218190
219191
@@ -272,23 +244,31 @@ trainer = bp.nn.BPTT(net,
272244Analyzing a low-dimensional FitzHugh–Nagumo neuron model.
273245
274246``` python
275- bp.math.enable_x64()
276-
277247model = bp.dyn.FHN(1 )
278- analyzer = bp.analysis.PhasePlane2D(model,
279- target_vars = {' V' : [- 3 , 3 ], ' w' : [- 3 ., 3 .]},
280- pars_update = {' I_ext' : 0.8 },
281- resolutions = 0.01 )
248+
249+ analyzer = bp.analysis.PhasePlane2D(
250+ model,
251+ target_vars = {' V' : [- 3 , 3 ], ' w' : [- 3 ., 3 .]},
252+ pars_update = {' I_ext' : 0.8 },
253+ resolutions = 0.01
254+ )
282255analyzer.plot_nullcline()
283256analyzer.plot_vector_field()
284257analyzer.plot_fixed_point()
285- analyzer.plot_trajectory({' V' : [- 2.8 ], ' w' : [- 1.8 ]}, duration = 100 .)
286258analyzer.show_figure()
287259```
288260
289- <p align =" center " ><img src =" ./docs/_static/fhn_ppa.png " width =" 60% " >
290- </p >
261+ Analyzing a high-dimensional continuous-attractor neural network (CANN).
291262
263+ ``` python
264+ cann_model = CANN(100 ) # your high-dimensional CANN network
265+
266+ finder = bp.analysis.SlowPointFinder(f_cell = cann_model)
267+ finder.find_fps_with_gd_method(candidates = bm.random.random((1000 , 100 )))
268+ finder.filter_loss(tolerance = 1e-5 )
269+ finder.keep_unique(tolerance = 0.03 )
270+ finder.exclude_outliers(0.1 )
271+ ```
292272
293273
294274### 6. More others
0 commit comments