Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
7a5f7a0
Merge branch 'develop' into feature/repo_updates_via_api
pgleeson Jan 27, 2025
13bbd8f
Generated all biomodels
pgleeson Jan 27, 2025
dc7cde2
Tweak info script
pgleeson Jan 27, 2025
3cd14fa
Added 2 modeldb to osb
pgleeson Jan 27, 2025
542e9cd
Adding many new modeldb repos
pgleeson Jan 27, 2025
2b8d663
Initial version of working biomodels loader
pgleeson Jan 27, 2025
a7bd102
Just cachine curated biomodles models
pgleeson Jan 29, 2025
a39c5ee
More added for biomodels to v2dev
pgleeson Jan 30, 2025
4ce9cb3
Added all remaining biomodels models to v2dev - 1070 ttal
pgleeson Jan 30, 2025
c09ec67
First 500 biomodels added to live
pgleeson Feb 3, 2025
694b250
All remaining biomodels added
pgleeson Feb 3, 2025
4c58750
Latest cache
pgleeson Feb 26, 2025
555dd0f
Fix notebook base images
filippomc Apr 2, 2025
e1e9652
Fix jupyterlab to use python 3.11
filippomc Apr 2, 2025
e18ef7d
Update jupyterlab image
filippomc Apr 4, 2025
abbc574
Latest caches
pgleeson Apr 11, 2025
23687f9
Merge pull request #959 from OpenSourceBrain/jupyterhub-update
pgleeson Apr 11, 2025
76bfe8e
osb_gh cache modified
pgleeson Apr 11, 2025
79034e3
More biomodels cache
pgleeson Apr 11, 2025
5fd64c1
More modeldb cache
pgleeson Apr 11, 2025
94f170f
Merge branch 'feature/docker-jupyterhub-pg' into feature/repo_updates…
pgleeson Apr 11, 2025
7e3bb8a
Regenerated and added a readme
pgleeson Apr 14, 2025
c78b029
Updated modeldb info
pgleeson Apr 14, 2025
a70d476
Updated biomodles cache
pgleeson Apr 16, 2025
ea7f58f
Update osb gh cache
pgleeson May 7, 2025
004829a
Moved 5 biomodels repos & added 1 new
pgleeson May 7, 2025
8a10b38
Adding 23 new biomodels to v2dev
pgleeson May 7, 2025
f4c1918
Added 24 new biomodels
pgleeson May 7, 2025
69b7a3f
Updates to cache
pgleeson Jul 25, 2025
b1fe272
Improved readme
pgleeson Jul 25, 2025
2239a25
Latest gh & v2/v2dev repos
pgleeson Jul 25, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion applications/jupyterhub/deploy/templates/configmap.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ apiVersion: v1
metadata:
name: "jupyterhub-notebook-config"
labels:
app: jupytehub
app: jupyterhub
data:
{{- (.Files.Glob "resources/jupyterhub/applications/*").AsConfig | nindent 2 }}
---
2 changes: 1 addition & 1 deletion applications/jupyterlab-minimal/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM jupyter/base-notebook:hub-1.5.0
FROM quay.io/jupyter/base-notebook:hub-4.0.2

USER root
RUN apt-get update && apt-get install git -y
Expand Down
14 changes: 7 additions & 7 deletions applications/jupyterlab/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM quay.io/jupyter/base-notebook:latest
FROM quay.io/jupyter/base-notebook:hub-4.0.2
USER root


Expand Down Expand Up @@ -35,12 +35,12 @@ USER root
RUN echo -e '\n\nalias cd..="cd .." \nalias h=history \nalias ll="ls -alt" \n' >> ~/.bashrc

### Set up jnml, reusing pynml jar
RUN echo -e '#!/bin/bash\n#Reusing the jNeuroML jar from the pip installed pyNeuroML for the jnml command\n\njava -classpath /opt/conda/lib/python3.12/site-packages/pyneuroml/lib/jNeuroML-*-jar-with-dependencies.jar org.neuroml.JNeuroML $@' >> /opt/conda/bin/jnml
RUN echo -e '#!/bin/bash\n#Reusing the jNeuroML jar from the pip installed pyNeuroML for the jnml command\n\njava -classpath /opt/conda/lib/python3.11/site-packages/pyneuroml/lib/jNeuroML-*-jar-with-dependencies.jar org.neuroml.JNeuroML $@' >> /opt/conda/bin/jnml
RUN chmod +x /opt/conda/bin/jnml
ENV JNML_HOME=/opt/conda/bin

### Set up lems, reusing pynml jar
RUN echo -e '#!/bin/bash\n#Reusing the jNeuroML jar from the pip installed pyNeuroML for the lems command\n\njava -classpath /opt/conda/lib/python3.12/site-packages/pyneuroml/lib/jNeuroML-*-jar-with-dependencies.jar org.lemsml.jlems.viz.VizMain $@' >> /opt/conda/bin/lems
RUN echo -e '#!/bin/bash\n#Reusing the jNeuroML jar from the pip installed pyNeuroML for the lems command\n\njava -classpath /opt/conda/lib/python3.11/site-packages/pyneuroml/lib/jNeuroML-*-jar-with-dependencies.jar org.lemsml.jlems.viz.VizMain $@' >> /opt/conda/bin/lems
RUN chmod +x /opt/conda/bin/lems

RUN cat ~/.bashrc
Expand Down Expand Up @@ -93,10 +93,10 @@ RUN cd /tmp && \
cd nest && \
mkdir $NEST_HOME && \
apt-get install libgsl-dev -y && \
cmake -DCMAKE_INSTALL_PREFIX:PATH=$NEST_HOME -DPYTHON_EXECUTABLE:FILEPATH=/opt/conda/bin/python -DPYTHON_INCLUDE_DIR=/opt/conda/include/python3.12 . && \
cmake -DCMAKE_INSTALL_PREFIX:PATH=$NEST_HOME -DPYTHON_EXECUTABLE:FILEPATH=/opt/conda/bin/python -DPYTHON_INCLUDE_DIR=/opt/conda/include/python3.11 . && \
make -j7 && \
make install
ENV PYTHONPATH=$NEST_HOME/lib/python3.12/site-packages
ENV PYTHONPATH=$NEST_HOME/lib/python3.11/site-packages
ENV PATH=$PATH:$NEST_HOME/bin

USER jovyan
Expand All @@ -106,7 +106,7 @@ RUN pip install backports.tarfile>=1.2 # temp fix for error: ImportError: cannot
RUN pip install -r requirements.txt --upgrade --no-cache-dir

# Compile NEURON mod files for PyNN
RUN cd /opt/conda/lib/python3.12/site-packages/pyNN/neuron/nmodl && nrnivmodl
RUN cd /opt/conda/lib/python3.11/site-packages/pyNN/neuron/nmodl && nrnivmodl

# Install NeuroML Schemas etc.
RUN git clone https://github.com/NeuroML/NeuroML2
Expand All @@ -121,7 +121,7 @@ ENV PATH=$PATH:$XPP_HOME

#########################################################################
# fix for https://github.com/jupyter/notebook/issues/7048
RUN pip install traitlets==5.9.0
# RUN pip install traitlets==5.9.0

COPY --chown=jovyan:users overrides/* /opt/conda/share/jupyter/lab/static/
WORKDIR /opt/workspace
53 changes: 20 additions & 33 deletions applications/jupyterlab/overrides/index.html
Original file line number Diff line number Diff line change
@@ -1,18 +1,9 @@
<!doctype html>
<html lang="en">

<head><meta charset="utf-8"><title>JupyterLab</title><meta name="viewport" content="width=device-width,initial-scale=1">{# Copy so we do not modify the page_config with updates. #} {% set page_config_full = page_config.copy() %} {# Set a dummy variable - we just want the side effect of the update. #} {% set _ = page_config_full.update(baseUrl=base_url, wsUrl=ws_url) %}<script id="jupyter-config-data" type="application/json">{{ page_config_full | tojson }}</script>{% block favicon %}<link rel="icon" type="image/x-icon" href="{{ base_url | escape }}static/favicons/favicon.ico" class="idle favicon"><link rel="" type="image/x-icon" href="{{ base_url | escape }}static/favicons/favicon-busy-1.ico" class="busy favicon">{% endblock %} {% if custom_css %}<link rel="stylesheet" href="{{ base_url | escape }}custom/custom.css">{% endif %}<script defer="defer" src="{{page_config.fullStaticUrl}}/main.ea7277c9c015ae398545.js?v=ea7277c9c015ae398545"></script></head>




<body class="jp-ThemedContainer">
<script>/* Remove token from URL. */
<!doctype html><html lang="en"><head><meta charset="utf-8"><title>JupyterLab</title><meta name="viewport" content="width=device-width,initial-scale=1">{# Copy so we do not modify the page_config with updates. #} {% set page_config_full = page_config.copy() %} {# Set a dummy variable - we just want the side effect of the update. #} {% set _ = page_config_full.update(baseUrl=base_url, wsUrl=ws_url) %}<script id="jupyter-config-data" type="application/json">{{ page_config_full | tojson }}</script>{% block favicon %}<link rel="icon" type="image/x-icon" href="{{ base_url | escape }}static/favicons/favicon.ico" class="idle favicon"><link rel="" type="image/x-icon" href="{{ base_url | escape }}static/favicons/favicon-busy-1.ico" class="busy favicon">{% endblock %} {% if custom_css %}<link rel="stylesheet" href="{{ base_url | escape }}custom/custom.css">{% endif %}<script defer="defer" src="{{page_config.fullStaticUrl}}/main.29f1d48bf941e6484236.js?v=29f1d48bf941e6484236"></script></head><body><script>/* Remove token from URL. */
(function () {
var location = window.location;
var search = location.search;
var location = window.location;
var search = location.search;

/**** CUSTOM OSB LOAD RESOURCE ****/
/**** CUSTOM OSB LOAD RESOURCE ****/
if (window !== window.parent && !window.location.href.includes("lab/tree")) {
window.parent.postMessage({ type: "APP_READY" }, "*");
}
Expand All @@ -38,27 +29,23 @@

/**** END CUSTOM OSB LOAD RESOURCE ****/

// If there is no query string, bail.
if (search.length <= 1) {
return;
}

// Rebuild the query string without the `token`.
var query = '?' + search.slice(1).split('&')
.filter(function (param) { return param.split('=')[0] !== 'token'; })
.join('&');
// If there is no query string, bail.
if (search.length <= 1) {
return;
}

// Rebuild the URL with the new query string.
var url = location.origin + location.pathname +
(query !== '?' ? query : '') + location.hash;
// Rebuild the query string without the `token`.
var query = '?' + search.slice(1).split('&')
.filter(function (param) { return param.split('=')[0] !== 'token'; })
.join('&');

if (url === location.href) {
return;
}
// Rebuild the URL with the new query string.
var url = location.origin + location.pathname +
(query !== '?' ? query : '') + location.hash;

window.history.replaceState({}, '', url);

})();</script>
</body>
if (url === location.href) {
return;
}

</html>
window.history.replaceState({ }, '', url);
})();</script></body></html>
4 changes: 2 additions & 2 deletions applications/netpyne/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,12 @@ COPY $APP_DIR/webapp .
RUN yarn build-dev

### Download on a separate stage to run in parallel with buildkit
FROM quay.io/jupyter/base-notebook:latest as downloads
FROM quay.io/jupyter/base-notebook:hub-4.0.2 as downloads
USER root
RUN wget --no-check-certificate -O /nyhead.mat https://www.parralab.org/nyhead/sa_nyhead.mat

###
FROM quay.io/jupyter/base-notebook:latest
FROM quay.io/jupyter/base-notebook:hub-4.0.2
ARG APP_DIR=dependencies/NetPyNE-UI
ENV NB_UID=jovyan
ENV FOLDER=netpyne
Expand Down
2 changes: 1 addition & 1 deletion applications/nwb-explorer/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ RUN yarn build


###
FROM quay.io/jupyter/base-notebook:latest
FROM quay.io/jupyter/base-notebook:hub-4.0.2
ENV NB_UID=jovyan
ENV FOLDER=nwb-explorer
USER root
Expand Down
2 changes: 1 addition & 1 deletion applications/nwb-explorer/dependencies/nwb-explorer
Submodule nwb-explorer updated from ff44c2 to b0839b
51 changes: 51 additions & 0 deletions libraries/client/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
# Scripts for getting lists of repositories on OSBv2 and associated databases

These scripts can be used to get a cached list of the current repositories on OSBv2 and v2dev, and also keep them up to date with the contents of DANDI, ModelDB, BioModels etc.

0) A GitHub access token must be created so scripts using the python `github` package can be run.

Save in `github.auth` locally.

1) Update all current cached lists:

```
./info_all.sh -q # Runs a quick check of contents of OSBv1, OSBv2, OSBv2dev, OSB repos on Github & DANDI Archive.
./info_all.sh # Same as above, but with BioModels & ModelDB
```

Contents of these caches will be saved in JSON files in `cached_info/`

2) Check/update OSBv1 projects

The following command will regenerate the cached list of current OSBv1 projects using the OSBv1 API:

```
python osbv1_info.py
```

It saves the list to `cached_info/projects_v1.json`.

If there has been a new project created on OSBv1 recently (and so the json cache has changed), which hasn't been added to v2/v2dev, run:

```
python loadosbv1.py -v2dev -dry # this does a dry run and prints info on which projects/repos it still needs to add
```

Get an access token by logging in to http://v2dev.opensourcebrain.org, opening the Web Developer console, loading a page, copying the network access token (e.g. abcxxx123) and using this to add the repo via the api:

```
python loadosbv1.py abcxxx123 -v2dev # add new repos

python osb_info.py -v2dev # regenerate cached list of all OSBv2 repos
```

Then do the same using `-v2` instead of for `-v2dev` for the live version of OSBv2.

3) Check/update the cached info for OSB projects on GitHub

This will generate a cached list of all repositories under https://github.com/opensourcebrain into `cached_info/osb_gh.json`. Note: most (~2K) of these are forks of ModelDB GitHub repos, many of the rest are repos which were used on OSBv1.

```
python osb_gh_info.py
```

105 changes: 40 additions & 65 deletions libraries/client/biomodels_info.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,38 @@
"""
Script to get Biomodels project info
Script to get BioModels project info
"""

import requests
import json
import pprint
from loadbiomodels import get_model_identifiers, get_model_info

verbose = True #
verbose = False

info_all = {}

API_URL: str = "https://www.ebi.ac.uk/biomodels"
out_format = "json"


def get_model_identifiers():
response = requests.get(API_URL + "/model/identifiers?format=" + out_format)
response.raise_for_status()
output = response.json()
return output


def get_model_info(model_id):
response = requests.get(API_URL + "/" + model_id + "?format=" + out_format)
response.raise_for_status()
output = response.json()
return output


if __name__ == "__main__":
min_index = 0
max_index = 20
index = 0

from loadbiomodels import get_model_identifiers
max_index = 10000
index = 1

model_ids = get_model_identifiers()["models"]

Expand All @@ -29,73 +44,33 @@
% (index, len(selection), index + min_index, model_id)
)

model_link = f"[{model_id}](https://www.ebi.ac.uk/biomodels/{model_id})"
info = get_model_info(model_id)
model_name = info["name"]
print(f" {model_id}: \n {pprint.pformat(info['name'])}--")
model_url = f"https://www.ebi.ac.uk/biomodels/{model_id}"
model_link = f"[{model_id}]({model_url})"
try:
info = get_model_info(model_id)
if info["curationStatus"] != "CURATED":
print(
" **** Not adding, as curationStatus = %s"
% info["curationStatus"]
)
else:
model_name = info["name"]
print(f" {model_id}: \n {pprint.pformat(info['name'])}--")

info_all[model_id] = info
"""
son.loads(get_page('https://modeldb.science/api/v1/models/%s'%model))
info_all[model_id] = info
except Exception as e:
msg = f"Error retrieving model at {model_url}: {e}"

print(' %s'%info[model]['name'])
if 'gitrepo' in info[model] and info[model]['gitrepo']:
with_gitrepo+=1
print(' gitrepo: %s'%info[model]['gitrepo'])
else:
print(' gitrepo: %s'%False)
print(" ******* %s" % msg)

expected_forks = 0
possible_mdb_repo = 'ModelDBRepository/%s'%(info[model]['id'])
try:
mdb_repo = gh.get_repo(possible_mdb_repo)

repo_to_use = mdb_repo
print(' Exists at: %s (def branch: %s; forks: %i)'%(mdb_repo.html_url, mdb_repo.default_branch, mdb_repo.forks))

possible_osbgh_repo = 'OpenSourceBrain/%s'%(info[model]['id'])
try:
osb_repo = gh.get_repo(possible_osbgh_repo)
msg = ' Exists at: %s (def branch: %s; forks: %i), order %i'%(osb_repo.html_url, osb_repo.default_branch, osb_repo.forks, index+min_index)
on_osbv2.append(msg)
print(msg)
repo_to_use = osb_repo
expected_forks+=1

info[model]['osbv2_gh_repo'] = repo_to_use.html_url
info[model]['osbv2_gh_branch'] = repo_to_use.default_branch
except:
print(' Missing fork: %s, forking now: %s'%(possible_osbgh_repo, fork_if_missing))
if fork_if_missing:
print(' Forking to: %s...'%possible_osbgh_repo)
org = gh.get_organization('OpenSourceBrain')
org.create_fork(mdb_repo,default_branch_only=False)
msg = ' Forked to: %s...'%possible_osbgh_repo
print(msg)
forked_now.append(msg)

else:
msg = ' Yet to be forked: %i, order %i; %s'%(info[model]['id'], index+min_index,info[model]['name'])
print(msg)
to_be_forked.append(msg)


if (not mdb_repo.forks==expected_forks) and (not (info[model]['id'] in known_to_have_other_forks)):
msg = ' Unexpected forks for %i (%s != %s)...'%(info[model]['id'], mdb_repo.forks,expected_forks)
print(msg)
many_forks.append(msg)

except:
msg = ' Problem locating repo for: %i (%i/%i) %s'%(info[model]['id'],index, len(selection), info[model]['name'])
print(msg)
errors.append(msg)"""
info_all[model_id] = {"error": msg}

index += 1

if verbose:
infop = pprint.pprint(info_all, compact=True)

print("\nThere were %i models checked\n" % (len(info)))
print("\nThere were %i models checked\n" % (len(info_all)))

filename = "cached_info/biomodels.json"

Expand All @@ -104,4 +79,4 @@
fp.write(strj)


print("Data on Biomodels (%i models) written to %s" % (len(info), filename))
print("Data on Biomodels (%i models) written to %s" % (len(info_all), filename))
Loading
Loading