Skip to content

Commit 684f537

Browse files
authored
Merge branch 'NeuralEnsemble:master' into assert-errors
2 parents a586421 + b29cfa9 commit 684f537

25 files changed

+348
-257
lines changed

.gitignore

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,4 +76,8 @@ doc/*.plx
7676
doc/*.nev
7777
doc/*.ns5
7878
doc/*.nix
79-
doc/*.nwb
79+
doc/*.nwb
80+
*.plx
81+
*.smr
82+
B95.zip
83+
grouped_ephys

doc/source/conf.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -252,6 +252,7 @@
252252
)
253253

254254
sphinx_gallery_conf = {
255+
# 'only_warn_on_example_error': True, # helps with debugging broken examples
255256
"examples_dirs": "../../examples", # path to your example scripts
256257
"gallery_dirs": "examples", # path to where to save gallery generated output
257258
}

doc/source/contributing.rst

Lines changed: 24 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -208,6 +208,28 @@ The first time this is run, all of the Neo test files will be downloaded to your
208208
so the run time can be an hour or more.
209209
For subsequent runs, the files are already there, so the tests will run much faster.
210210

211+
Because Neo downloads datasets this can lead to issues in the course of offline development or
212+
for packaging Neo (e.g. for a Linux distribution). In order to not download datasets and to skip
213+
tests which require downloaded datasets the environment variable :code:`NEO_TESTS_NO_NETWORK` can
214+
be set to any truthy value (e.g. :code:`'True'``).
215+
216+
For macOS/Linux this can be done by doing:
217+
218+
.. code-block:: bash
219+
220+
NEO_TESTS_NO_NETWORK='True' pytest .
221+
222+
For Windows this can be done by doing:
223+
224+
.. code-block:: bat
225+
226+
set NEO_TESTS_NO_NETWORK=true
227+
228+
pytest .
229+
230+
This can also be done with a conda environment variable if developing in a conda env. To configure these
231+
see the docs at `conda env vars documentation`_.
232+
211233
It is often helpful to run only parts of the test suite. To test only the :mod:`neo.core` module,
212234
which is much quicker than testing :mod:`neo.io`, run::
213235

@@ -465,4 +487,5 @@ Making a release
465487
.. _PyPI: https://pypi.org/project/neo
466488
.. _`continuous integration server`: https://github.com/NeuralEnsemble/python-neo/actions
467489
.. _`Read the Docs`: https://neo.readthedocs.io/en/latest/
468-
.. _`docs configuration page`: https://readthedocs.org/projects/neo/
490+
.. _`docs configuration page`: https://readthedocs.org/projects/neo/
491+
.. _`conda env vars documentation`: https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#activating-an-environment

examples/generated_data.py

Lines changed: 0 additions & 128 deletions
This file was deleted.
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,3 +29,5 @@
2929
plt.plot(signal.times, signal)
3030
plt.xlabel(signal.sampling_period.dimensionality)
3131
plt.ylabel(signal.dimensionality)
32+
33+
plt.show()
Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,9 +24,9 @@
2424
image_seq = ImageSequence(l, sampling_rate=500 * pq.Hz, spatial_scale="m", units="V")
2525

2626
result = image_seq.signal_from_region(
27-
CircularRegionOfInterest(50, 50, 25),
28-
CircularRegionOfInterest(10, 10, 5),
29-
PolygonRegionOfInterest((50, 25), (50, 45), (14, 65), (90, 80)),
27+
CircularRegionOfInterest(image_seq,50, 50, 25),
28+
CircularRegionOfInterest(image_seq, 10, 10, 5),
29+
PolygonRegionOfInterest(image_seq,(50, 25), (50, 45), (14, 65), (90, 80)),
3030
)
3131

3232
for i in range(len(result)):
Lines changed: 95 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,95 @@
1+
"""
2+
Example for usecases.rst
3+
"""
4+
5+
from itertools import cycle
6+
import numpy as np
7+
from quantities import ms, mV, kHz
8+
import matplotlib.pyplot as plt
9+
from neo import Block, Segment, ChannelView, Group, SpikeTrain, AnalogSignal
10+
11+
store_signals = False
12+
13+
block = Block(name="probe data", tetrode_ids=["Tetrode #1", "Tetrode #2"])
14+
block.segments = [
15+
Segment(name="trial #1", index=0),
16+
Segment(name="trial #2", index=1),
17+
Segment(name="trial #3", index=2),
18+
]
19+
20+
n_units = {"Tetrode #1": 2, "Tetrode #2": 5}
21+
22+
# Create a group for each neuron, annotate each group with the tetrode from which it was recorded
23+
groups = []
24+
counter = 0
25+
for tetrode_id, n in n_units.items():
26+
groups.extend([Group(name=f"neuron #{counter + i + 1}", tetrode_id=tetrode_id) for i in range(n)])
27+
counter += n
28+
block.groups.extend(groups)
29+
30+
iter_group = cycle(groups)
31+
32+
# Create dummy data, one segment at a time
33+
for segment in block.segments:
34+
35+
# create two 4-channel AnalogSignals with dummy data
36+
signals = {
37+
"Tetrode #1": AnalogSignal(np.random.rand(1000, 4) * mV, sampling_rate=10 * kHz, tetrode_id="Tetrode #1"),
38+
"Tetrode #2": AnalogSignal(np.random.rand(1000, 4) * mV, sampling_rate=10 * kHz, tetrode_id="Tetrode #2"),
39+
}
40+
if store_signals:
41+
segment.analogsignals.extend(signals.values())
42+
43+
# create spike trains with dummy data
44+
# we will pretend the spikes have been extracted from the dummy signal
45+
for tetrode_id in ("Tetrode #1", "Tetrode #2"):
46+
for i in range(n_units[tetrode_id]):
47+
spiketrain = SpikeTrain(np.random.uniform(0, 100, size=30) * ms, t_stop=100 * ms)
48+
# assign each spiketrain to the appropriate segment
49+
segment.spiketrains.append(spiketrain)
50+
# assign each spiketrain to a given neuron
51+
current_group = next(iter_group)
52+
current_group.add(spiketrain)
53+
if store_signals:
54+
# add to the group a reference to the signal from which the spikes were obtained
55+
# this does not give a 1:1 correspondance between spike trains and signals,
56+
# for that we could use additional groups (and have groups of groups)
57+
current_group.add(signals[tetrode_id])
58+
59+
60+
# Now plot the data
61+
62+
# .. by trial
63+
plt.figure()
64+
for seg in block.segments:
65+
print(f"Analyzing segment {seg.index}")
66+
stlist = [st - st.t_start for st in seg.spiketrains]
67+
plt.subplot(len(block.segments), 1, seg.index + 1)
68+
count, bins = np.histogram(stlist)
69+
plt.bar(bins[:-1], count, width=bins[1] - bins[0])
70+
plt.title(f"PSTH in segment {seg.index}")
71+
plt.show()
72+
73+
# ..by neuron
74+
75+
plt.figure()
76+
for i, group in enumerate(block.groups):
77+
stlist = [st - st.t_start for st in group.spiketrains]
78+
plt.subplot(len(block.groups), 1, i + 1)
79+
count, bins = np.histogram(stlist)
80+
plt.bar(bins[:-1], count, width=bins[1] - bins[0])
81+
plt.title(f"PSTH of unit {group.name}")
82+
plt.show()
83+
84+
# ..by tetrode
85+
86+
plt.figure()
87+
for i, tetrode_id in enumerate(block.annotations["tetrode_ids"]):
88+
stlist = []
89+
for unit in block.filter(objects=Group, tetrode_id=tetrode_id):
90+
stlist.extend([st - st.t_start for st in unit.spiketrains])
91+
plt.subplot(2, 1, i + 1)
92+
count, bins = np.histogram(stlist)
93+
plt.bar(bins[:-1], count, width=bins[1] - bins[0])
94+
plt.title(f"PSTH blend of tetrode {tetrode_id}")
95+
plt.show()
File renamed without changes.

0 commit comments

Comments
 (0)