Skip to content

Commit 86c7af4

Browse files
committed
Merge branch 'experimental' into nml_examples
2 parents ee83477 + b3aa648 commit 86c7af4

File tree

133 files changed

+217812
-217764
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

133 files changed

+217812
-217764
lines changed

.github/workflows/ci.yml

Lines changed: 113 additions & 117 deletions
Original file line numberDiff line numberDiff line change
@@ -5,137 +5,133 @@ on:
55
pull_request:
66
push:
77
release:
8-
types:
9-
- published
8+
types: [published]
9+
10+
concurrency:
11+
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
12+
cancel-in-progress: true
1013

1114
jobs:
1215
pre-commit:
1316
name: Format
1417
runs-on: ubuntu-latest
1518
steps:
16-
- uses: actions/checkout@v4
17-
- uses: actions/setup-python@v5
18-
with:
19-
python-version: "3.10"
20-
- uses: pre-commit/[email protected]
21-
with:
22-
extra_args: --hook-stage manual --all-files
23-
24-
checks:
25-
name: Check Python ${{ matrix.python-version }} on ${{ matrix.runs-on }}
26-
runs-on: ${{ matrix.runs-on }}
19+
- uses: actions/checkout@v4
20+
- uses: actions/setup-python@v5
21+
with:
22+
python-version: '3.10'
23+
- uses: pre-commit/[email protected]
24+
with:
25+
extra_args: --hook-stage manual --all-files
26+
27+
tests:
28+
name: ${{ matrix.test-suite }}, py ${{ matrix.python-version }}, ${{ matrix.os }}
29+
runs-on: ${{ matrix.os }}
2730
strategy:
2831
fail-fast: false
2932
matrix:
30-
python-version: [ "3.9", "3.11", "3.12"]
31-
runs-on: [ubuntu-latest, macos-latest, windows-latest]
33+
python-version: ['3.10', '3.13']
34+
os: [ubuntu-latest, macos-latest, windows-latest]
35+
test-suite: [core, actr, pytorch, neuroml, tensorflow, psyneulink]
3236
exclude:
33-
- runs-on: macos-latest
34-
python-version: "3.9"
37+
- os: windows-latest
38+
test-suite: tensorflow
3539

3640
steps:
37-
- uses: actions/checkout@v4
38-
39-
- uses: actions/setup-python@v5
40-
with:
41-
python-version: ${{ matrix.python-version }}
42-
43-
# Rely on version of modelspec from PyPI as set by setup.cfg...
44-
#- name: Install modelspec
45-
# run: python -m pip install git+https://github.com/ModECI/modelspec.git@main
46-
47-
# Rely on version of NeuroMLlite from PyPI as set by setup.py...
48-
# - name: Install NeuroMLlite
49-
# run: python -m pip install NeuroMLlite>=0.5.0
50-
51-
# Rely on version of PsyNeuLink from PyPI as set by setup.py...
52-
#- name: Install specific PsyNeuLink branch
53-
# run: python -m pip install git+https://github.com/ModECI/PsyNeuLink@devel
54-
55-
- name: Install HDF5 for pytables on macos-14/latest
56-
if: ${{ matrix.runs-on == 'macos-latest' }}
57-
run: |
58-
brew install hdf5
59-
60-
- name: Install core package
61-
run: |
62-
python -m pip install .[dev]
63-
64-
- name: Version info for installed packages
65-
run: |
66-
pip list
67-
68-
- name: Test core package
69-
run: |
70-
python -m pytest -m coremdf tests/
71-
72-
- name: Install most optional dependencies
73-
run: |
74-
python -m pip install .[optional]
75-
76-
- name: Version info for optional installed packages
77-
run: |
78-
pip list
79-
80-
- name: Install graphviz
81-
uses: ts-graphviz/setup-graphviz@v2
82-
with:
83-
# Skip to run brew update command on macOS.
84-
macos-skip-brew-update: 'true' # default false
85-
86-
- name: Test interface ACT-R
87-
run: |
88-
python -m pytest -v -m "actr" tests/
89-
90-
- name: Test interface PyTorch
91-
run: |
92-
python -m pytest -v -m "pytorch" tests/
93-
94-
- name: Test interface NeuroML
95-
run: |
96-
python -m pip install .[neuroml]
97-
python -m pytest -v -m "neuroml" tests/
98-
99-
- name: Test interface TensorFlow
100-
run: |
101-
python -m pip install .[tensorflow]
102-
dot -V
103-
python -m pytest -v -m "tensorflow" tests/
104-
105-
- name: Test interface PsyNeuLink
106-
run: |
107-
python -m pip install .[psyneulink]
108-
python -m pytest -v -m "psyneulink" tests/
109-
110-
- name: Build Documentation
111-
run: |
112-
cd docs
113-
python generate.py
114-
cd sphinx
115-
make clean
116-
make html
117-
118-
- name: Final version info for optional installed packages
119-
run: |
120-
pip list
41+
- uses: actions/checkout@v4
42+
43+
- name: Setup Python
44+
uses: actions/setup-python@v5
45+
with:
46+
python-version: ${{ matrix.python-version }}
47+
48+
- name: Install uv and set the python version
49+
uses: astral-sh/setup-uv@v6
50+
with:
51+
python-version: ${{ matrix.python-version }}
52+
53+
- name: Install HDF5 for pytables on macos-14/latest
54+
if: ${{ matrix.runs-on == 'macos-latest' }}
55+
run: |
56+
brew install hdf5
57+
58+
- name: Install graphviz
59+
uses: ts-graphviz/setup-graphviz@v2
60+
with:
61+
# Skip to run brew update command on macOS.
62+
macos-skip-brew-update: 'true' # default false
63+
64+
- name: Install extras for ${{ matrix.test-suite }}
65+
shell: bash
66+
run: |
67+
uv sync --dev
68+
case "${{ matrix.test-suite }}" in
69+
pytorch) uv sync --extra "optional" --dev ;;
70+
neuroml) uv sync --extra "neuroml" --dev ;;
71+
tensorflow) uv sync --extra "tensorflow" --dev ;;
72+
psyneulink) uv sync --extra "psyneulink" --dev ;;
73+
esac
74+
75+
- name: Run ${{ matrix.test-suite }} tests
76+
shell: bash
77+
run: |
78+
MARKER="${{ matrix.test-suite }}"
79+
if [ "${{ matrix.test-suite }}" = "core" ]; then
80+
MARKER="coremdf"
81+
fi
82+
uv run pytest -v -m "${MARKER}" tests/
83+
84+
docs:
85+
name: Build Documentation
86+
needs: tests
87+
runs-on: ubuntu-latest
88+
steps:
89+
- uses: actions/checkout@v4
90+
- uses: actions/setup-python@v5
91+
with:
92+
python-version: '3.10'
93+
- name: Install uv and set the python version
94+
uses: astral-sh/setup-uv@v6
95+
with:
96+
python-version: '3.10'
97+
- name: Generate and Build Docs
98+
run: |
99+
uv sync --group docs
100+
cd docs
101+
uv run generate.py
102+
cd sphinx
103+
uv run make clean
104+
uv run make html
121105
122106
dist:
123107
name: Distribution build
124108
runs-on: ubuntu-latest
125-
126109
steps:
127-
- uses: actions/checkout@v4
128-
129-
- name: Build sdist and wheel
130-
run: pipx run --spec build pyproject-build
131-
132-
- uses: actions/upload-artifact@v4
133-
with:
134-
path: dist
135-
136-
- uses: pypa/[email protected]
137-
if: github.event_name == 'release' && github.event.action == 'published'
138-
with:
139-
user: __token__
140-
# Remember to generate this and set it in "GitHub Secrets"
141-
password: ${{ secrets.pypi_password }}
110+
- uses: actions/checkout@v4
111+
- name: Build sdist & wheel
112+
run: pipx run --spec build pyproject-build
113+
- uses: actions/upload-artifact@v4
114+
with:
115+
path: dist
116+
- uses: pypa/[email protected]
117+
if: github.event_name == 'release' && github.event.action == 'published'
118+
with:
119+
user: __token__
120+
password: ${{ secrets.pypi_password }}
121+
122+
all-passed:
123+
runs-on: ubuntu-latest
124+
needs:
125+
- pre-commit
126+
- tests
127+
- docs
128+
- dist
129+
if: always()
130+
steps:
131+
- name: Fail if any dependency did not succeed
132+
run: |
133+
echo "Upstream results: ${{ join(needs.*.result, ', ') }}"
134+
# If any result is failure/cancelled/skipped, fail this job
135+
if [[ "${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') || contains(needs.*.result, 'skipped') }}" == "true" ]]; then
136+
exit 1
137+
fi

.github/workflows/ci_test_all.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,9 @@ name: CI Test script
22

33
on:
44
push:
5-
branches: [ main, development, experimental, test*, nml* ]
5+
branches: [ main, development, experimental, test*, nml*, version* ]
66
pull_request:
7-
branches: [ main, development, experimental, test*, nml* ]
7+
branches: [ main, development, experimental, test*, nml*, version* ]
88

99
jobs:
1010

@@ -14,7 +14,7 @@ jobs:
1414
strategy:
1515
fail-fast: false
1616
matrix:
17-
python-version: [ "3.10"]
17+
python-version: [ "3.13"]
1818
runs-on: [ubuntu-latest]
1919

2020
steps:

README.md

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,13 @@
1313
**Note: MDF is still in development! See the [open issues related to the specification](https://github.com/ModECI/MDF/issues?q=is%3Aissue+is%3Aopen+label%3Aspecification) or go [here](http://modeci.org/#contactPage) to get in contact regarding MDF.**
1414
*The MDF format was first proposed following a meeting organised at Princeton in July 2019 by Russ Poldrack of the Center for Reproducible Neuroscience (CRN) at Stanford and the [Brain Imaging Data Standard (BIDS)](https://bids.neuroimaging.io/) initiative. For more on the previous work in this area, see [here](https://github.com/OpenSourceBrain/PsyNeuLinkShowcase/tree/master/BIDS-MDF).*
1515

16+
## Paper introducing MDF
17+
18+
The background to the ModECI project, the motivation for developing the Model Description Format, and the initial Python implementation of the language have been described in a NeuroView article in the Neuron journal:
19+
20+
*<b>Integrating model development across computational neuroscience, cognitive science, and machine learning</b>*
21+
Padraig Gleeson, Sharon Crook, David Turner, Katherine Mantel, Mayank Raunak, Ted Willke and Jonathan D. Cohen, April 25, 2023 DOI: [https://doi.org/10.1016/j.neuron.2023.03.037](https://doi.org/10.1016/j.neuron.2023.03.037)
22+
1623

1724
## Overview
1825

docs/MDF_function_specifications.json

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -686,15 +686,15 @@
686686
"expression_string": "onnx_ops.lppool(X, auto_pad, kernel_shape, p, pads, strides)"
687687
},
688688
"onnx::MatMul": {
689-
"description": "\nMatrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html\n",
689+
"description": "\nMatrix product that behaves like [numpy.matmul](https://numpy.org/doc/stable/reference/generated/numpy.matmul.html).\n",
690690
"arguments": [
691691
"A",
692692
"B"
693693
],
694694
"expression_string": "onnx_ops.matmul(A, B)"
695695
},
696696
"onnx::MatMulInteger": {
697-
"description": "\nMatrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html.\nThe production MUST never overflow. The accumulation may overflow if and only if in 32 bits.\n",
697+
"description": "\nMatrix product that behaves like [numpy.matmul](https://numpy.org/doc/stable/reference/generated/numpy.matmul.html).\nThe production MUST never overflow. The accumulation may overflow if and only if in 32 bits.\n",
698698
"arguments": [
699699
"A",
700700
"B",
@@ -711,7 +711,7 @@
711711
"expression_string": "onnx_ops.max(data_0)"
712712
},
713713
"onnx::MaxPool": {
714-
"description": "\n MaxPool consumes an input tensor X and applies max pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n max pooling consisting of computing the max on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape is calculated differently\n depending on whether explicit padding is used, where pads is employed, or auto padding is used, where auto_pad is utilized.\n With explicit padding (https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html?highlight=maxpool#torch.nn.MaxPool2d):\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1)\n ```\n or\n ```\n output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1)\n ```\n if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`. Sliding windows that would start in the right padded region are ignored.\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following when ceil_mode is enabled:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n or when ceil_mode is disabled (https://www.tensorflow.org/api_docs/python/tf/keras/layers/AveragePooling2D):\n ```\n VALID: output_spatial_shape[i] = floor((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i]) + 1\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = floor((input_spatial_shape[i] - 1) / strides_spatial_shape[i]) + 1\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]\n ```\n The output of each pooling window is maximum number of elements exclude pad. \n ",
714+
"description": "\n MaxPool consumes an input tensor X and applies max pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n max pooling consisting of computing the max on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape is calculated differently\n depending on whether explicit padding is used, where pads is employed, or auto padding is used, where auto_pad is utilized.\n With explicit padding (https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html?highlight=maxpool#torch.nn.MaxPool2d):\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1)\n ```\n or\n ```\n output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1)\n ```\n if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`.\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following when ceil_mode is enabled:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n or when ceil_mode is disabled (https://www.tensorflow.org/api_docs/python/tf/keras/layers/AveragePooling2D):\n ```\n VALID: output_spatial_shape[i] = floor((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i]) + 1\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = floor((input_spatial_shape[i] - 1) / strides_spatial_shape[i]) + 1\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]\n ```\n The output of each pooling window is maximum number of elements exclude pad. \n ",
715715
"arguments": [
716716
"X"
717717
],
@@ -900,7 +900,7 @@
900900
"expression_string": "onnx_ops.qlinearconv(x, x_scale, x_zero_point, w, w_scale, w_zero_point, y_scale, y_zero_point, B, auto_pad, dilations, group, kernel_shape, pads, strides)"
901901
},
902902
"onnx::QLinearMatMul": {
903-
"description": "\nMatrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html.\nIt consumes two quantized input tensors, their scales and zero points, scale and zero point of output,\nand computes the quantized output. The quantization formula is y = saturate((x / y_scale) + y_zero_point).\nFor (x / y_scale), it is rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details.\nScale and zero point must have same shape. They must be either scalar (per tensor) or N-D tensor\n(per row for 'a' and per column for 'b'). Scalar refers to per tensor quantization whereas N-D refers to per row\nor per column quantization. If the input is 2D of shape [M, K] then zero point and scale tensor may be\nan M element vector [v_1, v_2, ..., v_M] for per row quantization and K element vector of shape [v_1, v_2, ..., v_K]\nfor per column quantization. If the input is N-D tensor with shape [D1, D2, M, K] then zero point and scale tensor may\nhave shape [D1, D2, M, 1] for per row quantization and shape [D1, D2, 1, K] for per column quantization.\nProduction must never overflow, and accumulation may overflow if and only if in 32 bits.\n",
903+
"description": "\nMatrix product that behaves like [numpy.matmul](https://numpy.org/doc/stable/reference/generated/numpy.matmul.html).\nIt consumes two quantized input tensors, their scales and zero points, scale and zero point of output,\nand computes the quantized output. The quantization formula is y = saturate((x / y_scale) + y_zero_point).\nFor (x / y_scale), it is rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details.\nScale and zero point must have same shape. They must be either scalar (per tensor) or N-D tensor\n(per row for 'a' and per column for 'b'). Scalar refers to per tensor quantization whereas N-D refers to per row\nor per column quantization. If the input is 2D of shape [M, K] then zero point and scale tensor may be\nan M element vector [v_1, v_2, ..., v_M] for per row quantization and K element vector of shape [v_1, v_2, ..., v_K]\nfor per column quantization. If the input is N-D tensor with shape [D1, D2, M, K] then zero point and scale tensor may\nhave shape [D1, D2, M, 1] for per row quantization and shape [D1, D2, 1, K] for per column quantization.\nProduction must never overflow, and accumulation may overflow if and only if in 32 bits.\n",
904904
"arguments": [
905905
"a",
906906
"a_scale",

docs/MDF_function_specifications.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1685,7 +1685,7 @@ Python version: `onnx_ops.lppool(X, auto_pad, kernel_shape, p, pads, strides)`
16851685

16861686
## MatMul
16871687
<p><i>
1688-
Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html
1688+
Matrix product that behaves like [numpy.matmul](https://numpy.org/doc/stable/reference/generated/numpy.matmul.html).
16891689
</i></p>
16901690

16911691
Python version: `onnx_ops.matmul(A, B)`
@@ -1695,7 +1695,7 @@ Python version: `onnx_ops.matmul(A, B)`
16951695

16961696
## MatMulInteger
16971697
<p><i>
1698-
Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html.
1698+
Matrix product that behaves like [numpy.matmul](https://numpy.org/doc/stable/reference/generated/numpy.matmul.html).
16991699
The production MUST never overflow. The accumulation may overflow if and only if in 32 bits.
17001700
</i></p>
17011701

@@ -1732,7 +1732,7 @@ Python version: `onnx_ops.max(data_0)`
17321732
```
17331733
output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1)
17341734
```
1735-
if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`. Sliding windows that would start in the right padded region are ignored.
1735+
if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`.
17361736

17371737
`auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following when ceil_mode is enabled:
17381738
```
@@ -2237,7 +2237,7 @@ Python version: `onnx_ops.qlinearconv(x, x_scale, x_zero_point, w, w_scale, w_ze
22372237

22382238
## QLinearMatMul
22392239
<p><i>
2240-
Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html.
2240+
Matrix product that behaves like [numpy.matmul](https://numpy.org/doc/stable/reference/generated/numpy.matmul.html).
22412241
It consumes two quantized input tensors, their scales and zero points, scale and zero point of output,
22422242
and computes the quantized output. The quantization formula is y = saturate((x / y_scale) + y_zero_point).
22432243
For (x / y_scale), it is rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details.

0 commit comments

Comments
 (0)