Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
*.json
*.nwb
*-checkpoint.ipynb
.local/
.ipython/
.jupyter/
47 changes: 46 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1 +1,46 @@
Example tutorials that we have developed for trainings and courses.
# Brain Observatory Examples
Example Jupyter notebooks for the Allen Brain Observatory

This repository contains 3 sets of notebooks:

- `tutorial` contains a set of notebooks that provide a self-guided tour of
accessing the Brain Observatory through the AllenSDK. It is expected that
users will explore these notebooks in order.
- `workshops` contains notebooks used for any technical workshops that are run
by the Allen Institute. These are a snapshot and record of
- `gallery` contains example notebooks that demonstrate some analysis or
visualization using the Brain Observatory and AllenSDK. These notebooks are
standalone entities. If it doesn't belong in the main tutorial and it is not
from a technical workshop, it probably goes here.

```
brain_observatory_gallery
├─ tutorial/ # Official tutorial for the Allen Brain Observatory
│ ├─ requirements.txt # packages required to run the tutorial
│ ├─ runtime.txt # Python version required to run the tutorial
│ ├─ Index.ipynb
│ ├─ 1. Intro to Brain Observatory.ipynb
│ └─ ...
├─ gallery/ # analysis files per experiment session
│ ├─ requirements.txt # packages required to run the tutorial
│ ├─ runtime.txt
│ ├─ joy_plots.ipynb
│ └─ ...
└─ workshops/ # technical workshops
├─ 2018 Workshop Example/ # Example workshop
│ ├─ requirements.txt
│ ├─ runtime.txt
│ ├─ 1. Intro to Brain Observatory.ipynb
│ └─ ...
├─ 2018 Other Example/
└─ ...
```


## Running the examples

Each folder contains the necessary assets to build a Docker container for the notebooks contained therein using repo2docker...

1. install Docker
2. install repo2docker: `pip install repo2docker` (you may want to install from source)
3. build a docker container & launch a notebook server: `repo2docker ./`
2 changes: 2 additions & 0 deletions binder/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
-r ../gallery/requirements.txt
-r ../tutorial/requirements.txt
1 change: 1 addition & 0 deletions binder/runtime.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
python-2.7
5 changes: 5 additions & 0 deletions binder/start
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
#!/bin/bash
export BRAIN_OBSERVATORY_MANIFEST=~/brain_observatory_data/manifest.json

exec "$@"

224 changes: 224 additions & 0 deletions gallery/decoding_from_sweep_response.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,224 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"## requirements\n",
"# allensdk\n",
"# scikit-learn > 0.19\n",
"# xarray"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/local1/miniconda2/envs/jk/lib/python2.7/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
" from ._conv import register_converters as _register_converters\n"
]
}
],
"source": [
"from allensdk.core.brain_observatory_cache import BrainObservatoryCache"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"boc = BrainObservatoryCache(manifest_file='/local1/data/boc/manifest.json',)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"oeid = 541206592\n",
"\n",
"# Initializations:\n",
"nwb_dataset = boc.get_ophys_experiment_data(oeid)\n"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import xarray as xr\n",
"\n",
"from allensdk.brain_observatory.natural_scenes import NaturalScenes\n",
"\n",
"def get_ns_msr(nwb_dataset):\n",
" ns = NaturalScenes(nwb_dataset)\n",
" mean_sweep_response = ns.mean_sweep_response.copy()\n",
" \n",
" # I don't know what dx is. goodbye!\n",
" mean_sweep_response.drop('dx',axis=1,inplace=True)\n",
" \n",
" # annotate the dataframe with useful indices and columns\n",
" time = pd.Series(\n",
" ns.timestamps[ns.stim_table['start']],\n",
" name='time',\n",
" )\n",
" neurons = pd.Series(\n",
" ns.cell_id,\n",
" name='neuron',\n",
" )\n",
" mean_sweep_response.set_index(time,inplace=True)\n",
" mean_sweep_response.columns = neurons\n",
" \n",
" images = ns.stim_table\n",
" \n",
" return images, mean_sweep_response"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" frame start end\n",
"0 92 16125 16132\n",
"1 27 16133 16140\n",
"2 52 16140 16147\n",
"3 37 16148 16155\n",
"4 103 16155 16162\n",
"neuron 541510267 541510270 541510307 541510405 588381938 541510410 \\\n",
"time \n",
"545.19658 2.750398 3.113332 3.283231 1.035660 2.312769 1.005320 \n",
"545.46195 5.472741 4.520462 1.848134 1.509070 3.900594 2.375818 \n",
"545.69416 4.938696 1.872071 0.822514 -0.366550 0.590227 0.577107 \n",
"545.95953 0.686303 -1.502568 -2.128904 -0.779033 -3.098761 0.632175 \n",
"546.19174 -2.763241 -2.317277 -1.518564 -1.131271 -3.716857 0.189942 \n",
"\n",
"neuron 541511183 541510394 588381886 541511196 ... 588381999 \\\n",
"time ... \n",
"545.19658 1.795959 1.302906 1.109441 2.644591 ... 3.245388 \n",
"545.46195 0.627758 0.452645 -1.946570 -0.088816 ... 5.051270 \n",
"545.69416 -0.431362 3.239566 -2.953792 0.519713 ... 0.117338 \n",
"545.95953 2.272473 4.147108 -4.235024 -0.116484 ... -3.436971 \n",
"546.19174 1.049816 0.004382 -3.942320 -2.070535 ... -3.320092 \n",
"\n",
"neuron 541510679 541509977 541510142 541509981 541509952 541510950 \\\n",
"time \n",
"545.19658 1.204343 3.223873 -0.614828 1.594057 2.538169 9.322724 \n",
"545.46195 3.157544 2.147983 3.462305 1.155841 3.599893 10.790494 \n",
"545.69416 2.204452 0.433436 1.692538 4.946897 -2.181524 1.229501 \n",
"545.95953 1.661621 -1.256703 0.234285 5.248888 -1.481765 -3.012385 \n",
"546.19174 0.768980 0.713630 4.319497 1.107409 0.044746 -4.763652 \n",
"\n",
"neuron 541511172 541509957 541511118 \n",
"time \n",
"545.19658 3.246232 17.838305 44.883263 \n",
"545.46195 3.700325 41.864319 55.052734 \n",
"545.69416 1.446858 19.081219 -0.731454 \n",
"545.95953 0.283391 -6.771274 -18.988461 \n",
"546.19174 -0.807627 -15.687251 -24.453199 \n",
"\n",
"[5 rows x 154 columns]\n"
]
}
],
"source": [
"y, X = get_ns_msr(nwb_dataset)\n",
"print y.head()\n",
"print X.head()"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.linear_model import LogisticRegression\n",
"from sklearn.model_selection import train_test_split"
]
},
{
"cell_type": "code",
"execution_count": 34,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"def decode(msrx):\n",
" \n",
" # get features and output\n",
" X = msrx.data\n",
" y = msrx['natural_image']\n",
" \n",
" # split training & testing\n",
" X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,stratify=y)\n",
" \n",
" # do the classification\n",
" lm = LogisticRegression(\n",
" solver='saga',\n",
" multi_class='ovr',\n",
" penalty='l1',\n",
" n_jobs=-1,\n",
" )\n",
" lm.fit(X_train,y_train)\n",
" return lm.score(X_test,y_test) * len(np.unique(y))"
]
},
{
"cell_type": "code",
"execution_count": 35,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"19.9\n",
"1 loop, best of 1: 50.1 s per loop\n"
]
}
],
"source": [
"%%timeit -n 1 -r 1\n",
"print decode(msrx)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python [conda env:allensdk]",
"language": "python",
"name": "conda-env-allensdk-py"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.15"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Loading